diff --git a/.dockerignore b/.dockerignore
index 39b31bd2c..567609b12 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -1 +1 @@
-build/libgit2/
+build/
diff --git a/.github/dependabot.yaml b/.github/dependabot.yaml
new file mode 100644
index 000000000..f8796c21f
--- /dev/null
+++ b/.github/dependabot.yaml
@@ -0,0 +1,40 @@
+version: 2
+
+updates:
+ - package-ecosystem: "gomod"
+ directory: "/"
+ labels: ["dependencies"]
+ schedule:
+ interval: "monthly"
+ groups:
+ go-deps:
+ patterns:
+ - "*"
+ allow:
+ - dependency-type: "direct"
+ ignore:
+ # Cloud SDK are updated manually
+ - dependency-name: "cloud.google.com/*"
+ - dependency-name: "github.com/Azure/azure-sdk-for-go/*"
+ # Kubernetes deps are updated by fluxcd/pkg/runtime
+ - dependency-name: "k8s.io/*"
+ - dependency-name: "sigs.k8s.io/*"
+ - dependency-name: "github.com/go-logr/*"
+ # OCI deps are updated by fluxcd/pkg/oci
+ - dependency-name: "github.com/docker/*"
+ - dependency-name: "github.com/distribution/*"
+ - dependency-name: "github.com/google/go-containerregistry*"
+ - dependency-name: "github.com/opencontainers/*"
+ # Helm deps are updated by fluxcd/pkg/helmtestserver
+ - dependency-name: "helm.sh/helm/*"
+ # Flux APIs are updated at release time
+ - dependency-name: "github.com/fluxcd/source-controller/api"
+ - package-ecosystem: "github-actions"
+ directory: "/"
+ labels: ["area/ci", "dependencies"]
+ groups:
+ ci:
+ patterns:
+ - "*"
+ schedule:
+ interval: "monthly"
diff --git a/.github/labels.yaml b/.github/labels.yaml
new file mode 100644
index 000000000..2f3e1d525
--- /dev/null
+++ b/.github/labels.yaml
@@ -0,0 +1,42 @@
+# Configuration file to declaratively configure labels
+# Ref: https://github.com/EndBug/label-sync#Config-files
+
+- name: area/bucket
+ description: Bucket related issues and pull requests
+ color: '#00b140'
+- name: area/git
+ description: Git related issues and pull requests
+ color: '#863faf'
+- name: area/helm
+ description: Helm related issues and pull requests
+ color: '#1673b6'
+- name: area/oci
+ description: OCI related issues and pull requests
+ color: '#c739ff'
+- name: area/storage
+ description: Storage related issues and pull requests
+ color: '#4b0082'
+- name: backport:release/v1.0.x
+ description: To be backported to release/v1.0.x
+ color: '#ffd700'
+- name: backport:release/v1.1.x
+ description: To be backported to release/v1.1.x
+ color: '#ffd700'
+- name: backport:release/v1.2.x
+ description: To be backported to release/v1.2.x
+ color: '#ffd700'
+- name: backport:release/v1.3.x
+ description: To be backported to release/v1.3.x
+ color: '#ffd700'
+- name: backport:release/v1.4.x
+ description: To be backported to release/v1.4.x
+ color: '#ffd700'
+- name: backport:release/v1.5.x
+ description: To be backported to release/v1.5.x
+ color: '#ffd700'
+- name: backport:release/v1.6.x
+ description: To be backported to release/v1.6.x
+ color: '#ffd700'
+- name: backport:release/v1.7.x
+ description: To be backported to release/v1.7.x
+ color: '#ffd700'
diff --git a/.github/workflows/backport.yaml b/.github/workflows/backport.yaml
new file mode 100644
index 000000000..108e3e2bb
--- /dev/null
+++ b/.github/workflows/backport.yaml
@@ -0,0 +1,12 @@
+name: backport
+on:
+ pull_request_target:
+ types: [closed, labeled]
+jobs:
+ backport:
+ permissions:
+ contents: write # for reading and creating branches.
+ pull-requests: write # for creating pull requests against release branches.
+ uses: fluxcd/gha-workflows/.github/workflows/backport.yaml@v0.3.0
+ secrets:
+ github-token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/cifuzz.yaml b/.github/workflows/cifuzz.yaml
index d13d0a241..c25086ad1 100644
--- a/.github/workflows/cifuzz.yaml
+++ b/.github/workflows/cifuzz.yaml
@@ -1,28 +1,20 @@
-name: CIFuzz
+name: fuzz
on:
pull_request:
branches:
- - main
- paths-ignore:
- - 'CHANGELOG.md'
- - 'README.md'
- - 'MAINTAINERS'
-
-permissions:
- contents: read
-
+ - 'main'
+ - 'release/**'
jobs:
- Fuzzing:
+ smoketest:
runs-on: ubuntu-latest
+ permissions:
+ contents: read # for reading the repository code.
steps:
- - name: Checkout
- uses: actions/checkout@v3
- - name: Restore Go cache
- uses: actions/cache@v3
- with:
- path: /home/runner/work/_temp/_github_home/go/pkg/mod
- key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
- restore-keys: |
- ${{ runner.os }}-go-
- - name: Smoke test Fuzzers
- run: make fuzz-smoketest
+ - name: Test suite setup
+ uses: fluxcd/gha-workflows/.github/actions/setup-kubernetes@v0.3.0
+ with:
+ go-version: 1.25.x
+ - name: Smoke test Fuzzers
+ run: make fuzz-smoketest
+ env:
+ SKIP_COSIGN_VERIFICATION: true
diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml
index bfce099be..465bb8f42 100644
--- a/.github/workflows/e2e.yaml
+++ b/.github/workflows/e2e.yaml
@@ -1,89 +1,35 @@
name: e2e
-
on:
+ workflow_dispatch:
pull_request:
- paths-ignore:
- - 'CHANGELOG.md'
- - 'README.md'
- - 'MAINTAINERS'
push:
branches:
- - main
-
-permissions:
- contents: read # for actions/checkout to fetch code
-
+ - 'main'
+ - 'release/**'
jobs:
-
kind-linux-amd64:
runs-on: ubuntu-latest
+ permissions:
+ contents: read # for reading the repository code.
steps:
- - name: Checkout
- uses: actions/checkout@v3
- - name: Setup Go
- uses: actions/setup-go@v3
+ - name: Test suite setup
+ uses: fluxcd/gha-workflows/.github/actions/setup-kubernetes@v0.3.0
with:
- go-version: 1.18.x
- - name: Restore Go cache
- uses: actions/cache@v3
- with:
- path: /home/runner/work/_temp/_github_home/go/pkg/mod
- key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
- restore-keys: |
- ${{ runner.os }}-go-
+ go-version: 1.25.x
+ - name: Verify
+ run: make verify
- name: Enable integration tests
- # Only run integration tests for main branch
- if: github.ref == 'refs/heads/main'
+ # Only run integration tests for main and release branches
+ if: github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/heads/release/')
run: |
echo 'GO_TAGS=integration' >> $GITHUB_ENV
- - name: Setup Kubernetes
- uses: engineerd/setup-kind@v0.5.0
- with:
- version: v0.11.1
- image: kindest/node:v1.21.1@sha256:69860bda5563ac81e3c0057d654b5253219618a22ec3a346306239bba8cfa1a6
- - name: Setup Kustomize
- uses: fluxcd/pkg/actions/kustomize@main
- - name: Setup Helm
- uses: fluxcd/pkg/actions/helm@main
- name: Run E2E tests
env:
+ SKIP_COSIGN_VERIFICATION: true
CREATE_CLUSTER: false
run: make e2e
-
- kind-linux-arm64:
- # Hosted on Equinix
- # Docs: https://github.com/fluxcd/flux2/tree/main/.github/runners
- runs-on: [self-hosted, Linux, ARM64, equinix]
- steps:
- - name: Checkout
- uses: actions/checkout@v3
- - name: Setup Go
- uses: actions/setup-go@v3
- with:
- go-version: 1.18.x
- - name: Enable integration tests
- # Only run integration tests for main branch
- if: github.ref == 'refs/heads/main'
- run: |
- echo 'GO_TAGS=integration' >> $GITHUB_ENV
- - name: Prepare
- id: prep
- run: |
- echo ::set-output name=CLUSTER::arm64-${GITHUB_SHA:0:7}-$(date +%s)
- echo ::set-output name=CONTEXT::kind-arm64-${GITHUB_SHA:0:7}-$(date +%s)
- - name: Setup Kubernetes Kind
- run: |
- kind create cluster --name ${{ steps.prep.outputs.CLUSTER }} --kubeconfig=/tmp/${{ steps.prep.outputs.CLUSTER }}
- - name: Run e2e tests
- env:
- KIND_CLUSTER_NAME: ${{ steps.prep.outputs.CLUSTER }}
- KUBECONFIG: /tmp/${{ steps.prep.outputs.CLUSTER }}
- CREATE_CLUSTER: false
- BUILD_PLATFORM: linux/arm64
- MINIO_TAG: RELEASE.2020-09-17T04-49-20Z-arm64
- run: make e2e
- - name: Cleanup
+ - name: Print controller logs
if: always()
+ continue-on-error: true
run: |
- kind delete cluster --name ${{ steps.prep.outputs.CLUSTER }}
- rm /tmp/${{ steps.prep.outputs.CLUSTER }}
+ kubectl -n source-system logs -l app=source-controller
diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml
deleted file mode 100644
index 74180547f..000000000
--- a/.github/workflows/nightly.yml
+++ /dev/null
@@ -1,36 +0,0 @@
-name: nightly
-on:
- schedule:
- - cron: '0 0 * * *'
- workflow_dispatch:
-
-env:
- REPOSITORY: ${{ github.repository }}
-
-permissions:
- contents: read # for actions/checkout to fetch code
-
-jobs:
- build:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v3
- - name: Setup QEMU
- uses: docker/setup-qemu-action@v2
- with:
- platforms: all
- - name: Setup Docker Buildx
- id: buildx
- uses: docker/setup-buildx-action@v2
- with:
- buildkitd-flags: "--debug"
- - name: Build multi-arch container image
- uses: docker/build-push-action@v3
- with:
- push: false
- builder: ${{ steps.buildx.outputs.name }}
- context: .
- file: ./Dockerfile
- platforms: linux/amd64,linux/arm/v7,linux/arm64
- tags: |
- ${{ env.REPOSITORY }}:nightly
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 95f9d0412..ffb1c3cd9 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -9,92 +9,58 @@ on:
description: 'image tag prefix'
default: 'rc'
required: true
-
-permissions:
- contents: write # needed to write releases
- id-token: write # needed for keyless signing
- packages: write # needed for ghcr access
-
-env:
- CONTROLLER: ${{ github.event.repository.name }}
-
jobs:
- build-push:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v3
- - name: Setup Kustomize
- uses: fluxcd/pkg/actions/kustomize@main
- - name: Prepare
- id: prep
- run: |
- VERSION="${{ github.event.inputs.tag }}-${GITHUB_SHA::8}"
- if [[ $GITHUB_REF == refs/tags/* ]]; then
- VERSION=${GITHUB_REF/refs\/tags\//}
- fi
- echo ::set-output name=BUILD_DATE::$(date -u +'%Y-%m-%dT%H:%M:%SZ')
- echo ::set-output name=VERSION::${VERSION}
- - name: Setup QEMU
- uses: docker/setup-qemu-action@v2
- - name: Setup Docker Buildx
- id: buildx
- uses: docker/setup-buildx-action@v2
- - name: Login to GitHub Container Registry
- uses: docker/login-action@v2
- with:
- registry: ghcr.io
- username: fluxcdbot
- password: ${{ secrets.GHCR_TOKEN }}
- - name: Login to Docker Hub
- uses: docker/login-action@v2
- with:
- username: fluxcdbot
- password: ${{ secrets.DOCKER_FLUXCD_PASSWORD }}
- - name: Generate images meta
- id: meta
- uses: docker/metadata-action@v4
- with:
- images: |
- fluxcd/${{ env.CONTROLLER }}
- ghcr.io/fluxcd/${{ env.CONTROLLER }}
- tags: |
- type=raw,value=${{ steps.prep.outputs.VERSION }}
- - name: Publish images
- uses: docker/build-push-action@v3
- with:
- push: true
- builder: ${{ steps.buildx.outputs.name }}
- context: .
- file: ./Dockerfile
- platforms: linux/amd64,linux/arm/v7,linux/arm64
- tags: ${{ steps.meta.outputs.tags }}
- labels: ${{ steps.meta.outputs.labels }}
- - name: Check images
- run: |
- docker buildx imagetools inspect docker.io/fluxcd/${{ env.CONTROLLER }}:${{ steps.prep.outputs.VERSION }}
- docker buildx imagetools inspect ghcr.io/fluxcd/${{ env.CONTROLLER }}:${{ steps.prep.outputs.VERSION }}
- docker pull docker.io/fluxcd/${{ env.CONTROLLER }}:${{ steps.prep.outputs.VERSION }}
- docker pull ghcr.io/fluxcd/${{ env.CONTROLLER }}:${{ steps.prep.outputs.VERSION }}
- - uses: sigstore/cosign-installer@main
- - name: Sign images
- env:
- COSIGN_EXPERIMENTAL: 1
- run: |
- cosign sign fluxcd/${{ env.CONTROLLER }}:${{ steps.prep.outputs.VERSION }}
- cosign sign ghcr.io/fluxcd/${{ env.CONTROLLER }}:${{ steps.prep.outputs.VERSION }}
- - name: Generate release artifacts
- if: startsWith(github.ref, 'refs/tags/v')
- run: |
- mkdir -p config/release
- kustomize build ./config/crd > ./config/release/${{ env.CONTROLLER }}.crds.yaml
- kustomize build ./config/manager > ./config/release/${{ env.CONTROLLER }}.deployment.yaml
- echo '[CHANGELOG](https://github.com/fluxcd/${{ env.CONTROLLER }}/blob/main/CHANGELOG.md)' > ./config/release/notes.md
- - uses: anchore/sbom-action/download-syft@v0
- - name: Create release and SBOM
- if: startsWith(github.ref, 'refs/tags/v')
- uses: goreleaser/goreleaser-action@v3
- with:
- version: latest
- args: release --release-notes=config/release/notes.md --rm-dist --skip-validate
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ release:
+ permissions:
+ contents: write # for creating the GitHub release.
+ id-token: write # for creating OIDC tokens for signing.
+ packages: write # for pushing and signing container images.
+ uses: fluxcd/gha-workflows/.github/workflows/controller-release.yaml@v0.3.0
+ with:
+ controller: ${{ github.event.repository.name }}
+ release-candidate-prefix: ${{ github.event.inputs.tag }}
+ secrets:
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ dockerhub-token: ${{ secrets.DOCKER_FLUXCD_PASSWORD }}
+ release-provenance:
+ needs: [release]
+ permissions:
+ actions: read # for detecting the Github Actions environment.
+ id-token: write # for creating OIDC tokens for signing.
+ contents: write # for uploading attestations to GitHub releases.
+ if: startsWith(github.ref, 'refs/tags/v')
+ uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
+ with:
+ provenance-name: "provenance.intoto.jsonl"
+ base64-subjects: "${{ needs.release.outputs.hashes }}"
+ upload-assets: true
+ dockerhub-provenance:
+ needs: [release]
+ permissions:
+ contents: read # for reading the repository code.
+ actions: read # for detecting the Github Actions environment.
+ id-token: write # for creating OIDC tokens for signing.
+ packages: write # for uploading attestations.
+ if: startsWith(github.ref, 'refs/tags/v')
+ uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v2.1.0
+ with:
+ image: ${{ needs.release.outputs.image_url }}
+ digest: ${{ needs.release.outputs.image_digest }}
+ registry-username: fluxcdbot
+ secrets:
+ registry-password: ${{ secrets.DOCKER_FLUXCD_PASSWORD }}
+ ghcr-provenance:
+ needs: [release]
+ permissions:
+ contents: read # for reading the repository code.
+ actions: read # for detecting the Github Actions environment.
+ id-token: write # for creating OIDC tokens for signing.
+ packages: write # for uploading attestations.
+ if: startsWith(github.ref, 'refs/tags/v')
+ uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v2.1.0
+ with:
+ image: ghcr.io/${{ needs.release.outputs.image_url }}
+ digest: ${{ needs.release.outputs.image_digest }}
+ registry-username: fluxcdbot
+ secrets:
+ registry-password: ${{ secrets.GHCR_TOKEN }}
diff --git a/.github/workflows/scan.yaml b/.github/workflows/scan.yaml
index 332baa79d..4d7f2b0f5 100644
--- a/.github/workflows/scan.yaml
+++ b/.github/workflows/scan.yaml
@@ -1,5 +1,4 @@
-name: Scan
-
+name: scan
on:
push:
branches: [ main ]
@@ -7,39 +6,12 @@ on:
branches: [ main ]
schedule:
- cron: '18 10 * * 3'
-
-permissions:
- contents: read # for actions/checkout to fetch code
- security-events: write # for codeQL to write security events
-
jobs:
- fossa:
- name: FOSSA
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v3
- - name: Run FOSSA scan and upload build data
- uses: fossa-contrib/fossa-action@v1
- with:
- # FOSSA Push-Only API Token
- fossa-api-key: 5ee8bf422db1471e0bcf2bcb289185de
- github-token: ${{ github.token }}
-
- codeql:
- name: CodeQL
- runs-on: ubuntu-latest
- steps:
- - name: Checkout repository
- uses: actions/checkout@v3
- - name: Set up Go
- uses: actions/setup-go@v2
- with:
- go-version: 1.18
- - name: Initialize CodeQL
- uses: github/codeql-action/init@v2
- with:
- languages: go
- - name: Autobuild
- uses: github/codeql-action/autobuild@v2
- - name: Perform CodeQL Analysis
- uses: github/codeql-action/analyze@v2
+ analyze:
+ permissions:
+ contents: read # for reading the repository code.
+ security-events: write # for uploading the CodeQL analysis results.
+ uses: fluxcd/gha-workflows/.github/workflows/code-scan.yaml@v0.3.0
+ secrets:
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ fossa-token: ${{ secrets.FOSSA_TOKEN }}
diff --git a/.github/workflows/sync-labels.yaml b/.github/workflows/sync-labels.yaml
new file mode 100644
index 000000000..cc69156a8
--- /dev/null
+++ b/.github/workflows/sync-labels.yaml
@@ -0,0 +1,16 @@
+name: sync-labels
+on:
+ workflow_dispatch:
+ push:
+ branches:
+ - main
+ paths:
+ - .github/labels.yaml
+jobs:
+ sync-labels:
+ permissions:
+ contents: read # for reading the labels file.
+ issues: write # for creating and updating labels.
+ uses: fluxcd/gha-workflows/.github/workflows/labels-sync.yaml@v0.3.0
+ secrets:
+ github-token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml
new file mode 100644
index 000000000..4ba71463f
--- /dev/null
+++ b/.github/workflows/test.yaml
@@ -0,0 +1,22 @@
+name: test
+on:
+ workflow_dispatch:
+ pull_request:
+ push:
+ branches:
+ - 'main'
+ - 'release/**'
+jobs:
+ test-linux-amd64:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Test suite setup
+ uses: fluxcd/gha-workflows/.github/actions/setup-kubernetes@v0.3.0
+ with:
+ go-version: 1.25.x
+ - name: Run tests
+ env:
+ SKIP_COSIGN_VERIFICATION: true
+ TEST_AZURE_ACCOUNT_NAME: ${{ secrets.TEST_AZURE_ACCOUNT_NAME }}
+ TEST_AZURE_ACCOUNT_KEY: ${{ secrets.TEST_AZURE_ACCOUNT_KEY }}
+ run: make test
diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml
deleted file mode 100644
index 7bb907cf6..000000000
--- a/.github/workflows/tests.yaml
+++ /dev/null
@@ -1,89 +0,0 @@
-name: tests
-
-on:
- pull_request:
- paths-ignore:
- - 'CHANGELOG.md'
- - 'README.md'
- - 'MAINTAINERS'
-
- push:
- branches:
- - main
-
-permissions:
- contents: read # for actions/checkout to fetch code
-
-jobs:
-
- test-linux-amd64:
- runs-on: ubuntu-latest
- steps:
- - name: Checkout
- uses: actions/checkout@v3
- - name: Setup Go
- uses: actions/setup-go@v3
- with:
- go-version: 1.18.x
- - name: Restore Go cache
- uses: actions/cache@v3
- with:
- path: /home/runner/work/_temp/_github_home/go/pkg/mod
- key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
- restore-keys: |
- ${{ runner.os }}-go-
- - name: Run tests
- env:
- TEST_AZURE_ACCOUNT_NAME: ${{ secrets.TEST_AZURE_ACCOUNT_NAME }}
- TEST_AZURE_ACCOUNT_KEY: ${{ secrets.TEST_AZURE_ACCOUNT_KEY }}
- run: make test
-
- test-linux-arm64:
- # Hosted on Equinix
- # Docs: https://github.com/fluxcd/flux2/tree/main/.github/runners
- runs-on: [self-hosted, Linux, ARM64, equinix]
- steps:
- - name: Checkout
- uses: actions/checkout@v3
- - name: Setup Go
- uses: actions/setup-go@v3
- with:
- go-version: 1.18.x
- - name: Run tests
- env:
- TEST_AZURE_ACCOUNT_NAME: ${{ secrets.TEST_AZURE_ACCOUNT_NAME }}
- TEST_AZURE_ACCOUNT_KEY: ${{ secrets.TEST_AZURE_ACCOUNT_KEY }}
-
- # Temporarily disabling -race for arm64 as our GitHub action
- # runners don't seem to like it. The race detection was tested
- # on both Apple M1 and Linux arm64 with successful results.
- #
- # We should reenable go test -race for arm64 runners once the
- # current issue is resolved.
- GO_TEST_ARGS: ''
- run: make test
-
- # Runs 'make test' on MacOS to ensure the continuous support for contributors
- # using it as a development environment.
- darwin-amd64:
- strategy:
- matrix:
- os: [macos-11, macos-12]
- fail-fast: false
- runs-on: ${{ matrix.os }}
- steps:
- - name: Checkout
- uses: actions/checkout@v3
- - name: Setup Go
- uses: actions/setup-go@v3
- with:
- go-version: 1.18.x
- - name: Restore Go cache
- uses: actions/cache@v3
- with:
- path: /home/runner/work/_temp/_github_home/go/pkg/mod
- key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
- restore-keys: |
- ${{ runner.os }}-go-
- - name: Run tests
- run: make test
diff --git a/.github/workflows/verify.yaml b/.github/workflows/verify.yaml
deleted file mode 100644
index 33210245f..000000000
--- a/.github/workflows/verify.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
-name: verify
-
-on:
- pull_request:
- paths-ignore:
- - 'CHANGELOG.md'
- - 'README.md'
- - 'MAINTAINERS'
-
- push:
- branches:
- - main
-
-permissions:
- contents: read # for actions/checkout to fetch code
-
-jobs:
-
- verify-linux-amd64:
- runs-on: ubuntu-latest
- steps:
- - name: Checkout
- uses: actions/checkout@v3
- - name: Setup Go
- uses: actions/setup-go@v3
- with:
- go-version: 1.18.x
- - name: Restore Go cache
- uses: actions/cache@v3
- with:
- path: /home/runner/work/_temp/_github_home/go/pkg/mod
- key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
- restore-keys: |
- ${{ runner.os }}-go-
- - name: Verify
- run: make verify
diff --git a/.goreleaser.yaml b/.goreleaser.yaml
index f1074d546..7b61ce0c1 100644
--- a/.goreleaser.yaml
+++ b/.goreleaser.yaml
@@ -4,9 +4,26 @@ builds:
- skip: true
release:
- prerelease: "true"
extra_files:
- glob: config/release/*.yaml
+ prerelease: "auto"
+ header: |
+ ## Changelog
+
+ [{{.Tag}} changelog](https://github.com/fluxcd/{{.ProjectName}}/blob/{{.Tag}}/CHANGELOG.md)
+ footer: |
+ ## Container images
+
+ - `docker.io/fluxcd/{{.ProjectName}}:{{.Tag}}`
+ - `ghcr.io/fluxcd/{{.ProjectName}}:{{.Tag}}`
+
+ Supported architectures: `linux/amd64`, `linux/arm64` and `linux/arm/v7`.
+
+ The container images are built on GitHub hosted runners and are signed with cosign and GitHub OIDC.
+ To verify the images and their provenance (SLSA level 3), please see the [security documentation](https://fluxcd.io/flux/security/).
+
+changelog:
+ disable: true
checksum:
extra_files:
@@ -32,6 +49,7 @@ signs:
certificate: "${artifact}.pem"
args:
- sign-blob
+ - "--yes"
- "--output-certificate=${certificate}"
- "--output-signature=${signature}"
- "${artifact}"
diff --git a/ATTRIBUTIONS.md b/ATTRIBUTIONS.md
deleted file mode 100644
index 696ab9fa4..000000000
--- a/ATTRIBUTIONS.md
+++ /dev/null
@@ -1,1201 +0,0 @@
-# Attributions
-
-This application uses Open Source components. You can find the source
-code of their open source projects along with license information below.
-We acknowledge and are grateful to these developers for their contributions
-to open source.
-
-## libgit2
-
-Libgit2 was obtained in source-code form from its github repository:
-https://github.com/libgit2/libgit2/
-
-No changes were made to its original source code.
-
-Copyright notice (https://raw.githubusercontent.com/libgit2/libgit2/main/COPYING):
-
- libgit2 is Copyright (C) the libgit2 contributors,
- unless otherwise stated. See the AUTHORS file for details.
-
- Note that the only valid version of the GPL as far as this project
- is concerned is _this_ particular version of the license (ie v2, not
- v2.2 or v3.x or whatever), unless explicitly otherwise stated.
-
-----------------------------------------------------------------------
-
- LINKING EXCEPTION
-
- In addition to the permissions in the GNU General Public License,
- the authors give you unlimited permission to link the compiled
- version of this library into combinations with other programs,
- and to distribute those combinations without any restriction
- coming from the use of this file. (The General Public License
- restrictions do apply in other respects; for example, they cover
- modification of the file, and distribution when not linked into
- a combined executable.)
-
-----------------------------------------------------------------------
-
- GNU GENERAL PUBLIC LICENSE
- Version 2, June 1991
-
- Copyright (C) 1989, 1991 Free Software Foundation, Inc.
- 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The licenses for most software are designed to take away your
-freedom to share and change it. By contrast, the GNU General Public
-License is intended to guarantee your freedom to share and change free
-software--to make sure the software is free for all its users. This
-General Public License applies to most of the Free Software
-Foundation's software and to any other program whose authors commit to
-using it. (Some other Free Software Foundation software is covered by
-the GNU Library General Public License instead.) You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-this service if you wish), that you receive source code or can get it
-if you want it, that you can change the software or use pieces of it
-in new free programs; and that you know you can do these things.
-
- To protect your rights, we need to make restrictions that forbid
-anyone to deny you these rights or to ask you to surrender the rights.
-These restrictions translate to certain responsibilities for you if you
-distribute copies of the software, or if you modify it.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must give the recipients all the rights that
-you have. You must make sure that they, too, receive or can get the
-source code. And you must show them these terms so they know their
-rights.
-
- We protect your rights with two steps: (1) copyright the software, and
-(2) offer you this license which gives you legal permission to copy,
-distribute and/or modify the software.
-
- Also, for each author's protection and ours, we want to make certain
-that everyone understands that there is no warranty for this free
-software. If the software is modified by someone else and passed on, we
-want its recipients to know that what they have is not the original, so
-that any problems introduced by others will not reflect on the original
-authors' reputations.
-
- Finally, any free program is threatened constantly by software
-patents. We wish to avoid the danger that redistributors of a free
-program will individually obtain patent licenses, in effect making the
-program proprietary. To prevent this, we have made it clear that any
-patent must be licensed for everyone's free use or not licensed at all.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- GNU GENERAL PUBLIC LICENSE
- TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
- 0. This License applies to any program or other work which contains
-a notice placed by the copyright holder saying it may be distributed
-under the terms of this General Public License. The "Program", below,
-refers to any such program or work, and a "work based on the Program"
-means either the Program or any derivative work under copyright law:
-that is to say, a work containing the Program or a portion of it,
-either verbatim or with modifications and/or translated into another
-language. (Hereinafter, translation is included without limitation in
-the term "modification".) Each licensee is addressed as "you".
-
-Activities other than copying, distribution and modification are not
-covered by this License; they are outside its scope. The act of
-running the Program is not restricted, and the output from the Program
-is covered only if its contents constitute a work based on the
-Program (independent of having been made by running the Program).
-Whether that is true depends on what the Program does.
-
- 1. You may copy and distribute verbatim copies of the Program's
-source code as you receive it, in any medium, provided that you
-conspicuously and appropriately publish on each copy an appropriate
-copyright notice and disclaimer of warranty; keep intact all the
-notices that refer to this License and to the absence of any warranty;
-and give any other recipients of the Program a copy of this License
-along with the Program.
-
-You may charge a fee for the physical act of transferring a copy, and
-you may at your option offer warranty protection in exchange for a fee.
-
- 2. You may modify your copy or copies of the Program or any portion
-of it, thus forming a work based on the Program, and copy and
-distribute such modifications or work under the terms of Section 1
-above, provided that you also meet all of these conditions:
-
- a) You must cause the modified files to carry prominent notices
- stating that you changed the files and the date of any change.
-
- b) You must cause any work that you distribute or publish, that in
- whole or in part contains or is derived from the Program or any
- part thereof, to be licensed as a whole at no charge to all third
- parties under the terms of this License.
-
- c) If the modified program normally reads commands interactively
- when run, you must cause it, when started running for such
- interactive use in the most ordinary way, to print or display an
- announcement including an appropriate copyright notice and a
- notice that there is no warranty (or else, saying that you provide
- a warranty) and that users may redistribute the program under
- these conditions, and telling the user how to view a copy of this
- License. (Exception: if the Program itself is interactive but
- does not normally print such an announcement, your work based on
- the Program is not required to print an announcement.)
-
-These requirements apply to the modified work as a whole. If
-identifiable sections of that work are not derived from the Program,
-and can be reasonably considered independent and separate works in
-themselves, then this License, and its terms, do not apply to those
-sections when you distribute them as separate works. But when you
-distribute the same sections as part of a whole which is a work based
-on the Program, the distribution of the whole must be on the terms of
-this License, whose permissions for other licensees extend to the
-entire whole, and thus to each and every part regardless of who wrote it.
-
-Thus, it is not the intent of this section to claim rights or contest
-your rights to work written entirely by you; rather, the intent is to
-exercise the right to control the distribution of derivative or
-collective works based on the Program.
-
-In addition, mere aggregation of another work not based on the Program
-with the Program (or with a work based on the Program) on a volume of
-a storage or distribution medium does not bring the other work under
-the scope of this License.
-
- 3. You may copy and distribute the Program (or a work based on it,
-under Section 2) in object code or executable form under the terms of
-Sections 1 and 2 above provided that you also do one of the following:
-
- a) Accompany it with the complete corresponding machine-readable
- source code, which must be distributed under the terms of Sections
- 1 and 2 above on a medium customarily used for software interchange; or,
-
- b) Accompany it with a written offer, valid for at least three
- years, to give any third party, for a charge no more than your
- cost of physically performing source distribution, a complete
- machine-readable copy of the corresponding source code, to be
- distributed under the terms of Sections 1 and 2 above on a medium
- customarily used for software interchange; or,
-
- c) Accompany it with the information you received as to the offer
- to distribute corresponding source code. (This alternative is
- allowed only for noncommercial distribution and only if you
- received the program in object code or executable form with such
- an offer, in accord with Subsection b above.)
-
-The source code for a work means the preferred form of the work for
-making modifications to it. For an executable work, complete source
-code means all the source code for all modules it contains, plus any
-associated interface definition files, plus the scripts used to
-control compilation and installation of the executable. However, as a
-special exception, the source code distributed need not include
-anything that is normally distributed (in either source or binary
-form) with the major components (compiler, kernel, and so on) of the
-operating system on which the executable runs, unless that component
-itself accompanies the executable.
-
-If distribution of executable or object code is made by offering
-access to copy from a designated place, then offering equivalent
-access to copy the source code from the same place counts as
-distribution of the source code, even though third parties are not
-compelled to copy the source along with the object code.
-
- 4. You may not copy, modify, sublicense, or distribute the Program
-except as expressly provided under this License. Any attempt
-otherwise to copy, modify, sublicense or distribute the Program is
-void, and will automatically terminate your rights under this License.
-However, parties who have received copies, or rights, from you under
-this License will not have their licenses terminated so long as such
-parties remain in full compliance.
-
- 5. You are not required to accept this License, since you have not
-signed it. However, nothing else grants you permission to modify or
-distribute the Program or its derivative works. These actions are
-prohibited by law if you do not accept this License. Therefore, by
-modifying or distributing the Program (or any work based on the
-Program), you indicate your acceptance of this License to do so, and
-all its terms and conditions for copying, distributing or modifying
-the Program or works based on it.
-
- 6. Each time you redistribute the Program (or any work based on the
-Program), the recipient automatically receives a license from the
-original licensor to copy, distribute or modify the Program subject to
-these terms and conditions. You may not impose any further
-restrictions on the recipients' exercise of the rights granted herein.
-You are not responsible for enforcing compliance by third parties to
-this License.
-
- 7. If, as a consequence of a court judgment or allegation of patent
-infringement or for any other reason (not limited to patent issues),
-conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot
-distribute so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you
-may not distribute the Program at all. For example, if a patent
-license would not permit royalty-free redistribution of the Program by
-all those who receive copies directly or indirectly through you, then
-the only way you could satisfy both it and this License would be to
-refrain entirely from distribution of the Program.
-
-If any portion of this section is held invalid or unenforceable under
-any particular circumstance, the balance of the section is intended to
-apply and the section as a whole is intended to apply in other
-circumstances.
-
-It is not the purpose of this section to induce you to infringe any
-patents or other property right claims or to contest validity of any
-such claims; this section has the sole purpose of protecting the
-integrity of the free software distribution system, which is
-implemented by public license practices. Many people have made
-generous contributions to the wide range of software distributed
-through that system in reliance on consistent application of that
-system; it is up to the author/donor to decide if he or she is willing
-to distribute software through any other system and a licensee cannot
-impose that choice.
-
-This section is intended to make thoroughly clear what is believed to
-be a consequence of the rest of this License.
-
- 8. If the distribution and/or use of the Program is restricted in
-certain countries either by patents or by copyrighted interfaces, the
-original copyright holder who places the Program under this License
-may add an explicit geographical distribution limitation excluding
-those countries, so that distribution is permitted only in or among
-countries not thus excluded. In such case, this License incorporates
-the limitation as if written in the body of this License.
-
- 9. The Free Software Foundation may publish revised and/or new versions
-of the General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
-Each version is given a distinguishing version number. If the Program
-specifies a version number of this License which applies to it and "any
-later version", you have the option of following the terms and conditions
-either of that version or of any later version published by the Free
-Software Foundation. If the Program does not specify a version number of
-this License, you may choose any version ever published by the Free Software
-Foundation.
-
- 10. If you wish to incorporate parts of the Program into other free
-programs whose distribution conditions are different, write to the author
-to ask for permission. For software which is copyrighted by the Free
-Software Foundation, write to the Free Software Foundation; we sometimes
-make exceptions for this. Our decision will be guided by the two goals
-of preserving the free status of all derivatives of our free software and
-of promoting the sharing and reuse of software generally.
-
- NO WARRANTY
-
- 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
-FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
-OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
-PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
-OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
-TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
-PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
-REPAIR OR CORRECTION.
-
- 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
-REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
-INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
-OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
-TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
-YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
-PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGES.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-convey the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
-
- Copyright (C)
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-
-Also add information on how to contact you by electronic and paper mail.
-
-If the program is interactive, make it output a short notice like this
-when it starts in an interactive mode:
-
- Gnomovision version 69, Copyright (C) year name of author
- Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, the commands you use may
-be called something other than `show w' and `show c'; they could even be
-mouse-clicks or menu items--whatever suits your program.
-
-You should also get your employer (if you work as a programmer) or your
-school, if any, to sign a "copyright disclaimer" for the program, if
-necessary. Here is a sample; alter the names:
-
- Yoyodyne, Inc., hereby disclaims all copyright interest in the program
- `Gnomovision' (which makes passes at compilers) written by James Hacker.
-
- , 1 April 1989
- Ty Coon, President of Vice
-
-This General Public License does not permit incorporating your program into
-proprietary programs. If your program is a subroutine library, you may
-consider it more useful to permit linking proprietary applications with the
-library. If this is what you want to do, use the GNU Library General
-Public License instead of this License.
-
-----------------------------------------------------------------------
-
-The bundled ZLib code is licensed under the ZLib license:
-
-Copyright (C) 1995-2010 Jean-loup Gailly and Mark Adler
-
- This software is provided 'as-is', without any express or implied
- warranty. In no event will the authors be held liable for any damages
- arising from the use of this software.
-
- Permission is granted to anyone to use this software for any purpose,
- including commercial applications, and to alter it and redistribute it
- freely, subject to the following restrictions:
-
- 1. The origin of this software must not be misrepresented; you must not
- claim that you wrote the original software. If you use this software
- in a product, an acknowledgment in the product documentation would be
- appreciated but is not required.
- 2. Altered source versions must be plainly marked as such, and must not be
- misrepresented as being the original software.
- 3. This notice may not be removed or altered from any source distribution.
-
- Jean-loup Gailly Mark Adler
- jloup@gzip.org madler@alumni.caltech.edu
-
-----------------------------------------------------------------------
-
-The Clar framework is licensed under the ISC license:
-
-Copyright (c) 2011-2015 Vicent Marti
-
-Permission to use, copy, modify, and/or distribute this software for any
-purpose with or without fee is hereby granted, provided that the above
-copyright notice and this permission notice appear in all copies.
-
-THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-----------------------------------------------------------------------
-
-The bundled PCRE implementation (deps/pcre/) is licensed under the BSD
-license.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- * Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimer.
-
- * Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
- * Neither the name of the University of Cambridge nor the name of Google
- Inc. nor the names of their contributors may be used to endorse or
- promote products derived from this software without specific prior
- written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-----------------------------------------------------------------------
-
-The bundled winhttp definition files (deps/winhttp/) are licensed under
-the GNU LGPL (available at the end of this file).
-
-Copyright (C) 2007 Francois Gouget
-
-This library is free software; you can redistribute it and/or
-modify it under the terms of the GNU Lesser General Public
-License as published by the Free Software Foundation; either
-version 2.1 of the License, or (at your option) any later version.
-
-This library is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-Lesser General Public License for more details.
-
-You should have received a copy of the GNU Lesser General Public
-License along with this library; if not, write to the Free Software
-Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
-
-----------------------------------------------------------------------
-
- GNU LESSER GENERAL PUBLIC LICENSE
- Version 2.1, February 1999
-
- Copyright (C) 1991, 1999 Free Software Foundation, Inc.
- 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-[This is the first released version of the Lesser GPL. It also counts
- as the successor of the GNU Library Public License, version 2, hence
- the version number 2.1.]
-
- Preamble
-
- The licenses for most software are designed to take away your
-freedom to share and change it. By contrast, the GNU General Public
-Licenses are intended to guarantee your freedom to share and change
-free software--to make sure the software is free for all its users.
-
- This license, the Lesser General Public License, applies to some
-specially designated software packages--typically libraries--of the
-Free Software Foundation and other authors who decide to use it. You
-can use it too, but we suggest you first think carefully about whether
-this license or the ordinary General Public License is the better
-strategy to use in any particular case, based on the explanations below.
-
- When we speak of free software, we are referring to freedom of use,
-not price. Our General Public Licenses are designed to make sure that
-you have the freedom to distribute copies of free software (and charge
-for this service if you wish); that you receive source code or can get
-it if you want it; that you can change the software and use pieces of
-it in new free programs; and that you are informed that you can do
-these things.
-
- To protect your rights, we need to make restrictions that forbid
-distributors to deny you these rights or to ask you to surrender these
-rights. These restrictions translate to certain responsibilities for
-you if you distribute copies of the library or if you modify it.
-
- For example, if you distribute copies of the library, whether gratis
-or for a fee, you must give the recipients all the rights that we gave
-you. You must make sure that they, too, receive or can get the source
-code. If you link other code with the library, you must provide
-complete object files to the recipients, so that they can relink them
-with the library after making changes to the library and recompiling
-it. And you must show them these terms so they know their rights.
-
- We protect your rights with a two-step method: (1) we copyright the
-library, and (2) we offer you this license, which gives you legal
-permission to copy, distribute and/or modify the library.
-
- To protect each distributor, we want to make it very clear that
-there is no warranty for the free library. Also, if the library is
-modified by someone else and passed on, the recipients should know
-that what they have is not the original version, so that the original
-author's reputation will not be affected by problems that might be
-introduced by others.
-
- Finally, software patents pose a constant threat to the existence of
-any free program. We wish to make sure that a company cannot
-effectively restrict the users of a free program by obtaining a
-restrictive license from a patent holder. Therefore, we insist that
-any patent license obtained for a version of the library must be
-consistent with the full freedom of use specified in this license.
-
- Most GNU software, including some libraries, is covered by the
-ordinary GNU General Public License. This license, the GNU Lesser
-General Public License, applies to certain designated libraries, and
-is quite different from the ordinary General Public License. We use
-this license for certain libraries in order to permit linking those
-libraries into non-free programs.
-
- When a program is linked with a library, whether statically or using
-a shared library, the combination of the two is legally speaking a
-combined work, a derivative of the original library. The ordinary
-General Public License therefore permits such linking only if the
-entire combination fits its criteria of freedom. The Lesser General
-Public License permits more lax criteria for linking other code with
-the library.
-
- We call this license the "Lesser" General Public License because it
-does Less to protect the user's freedom than the ordinary General
-Public License. It also provides other free software developers Less
-of an advantage over competing non-free programs. These disadvantages
-are the reason we use the ordinary General Public License for many
-libraries. However, the Lesser license provides advantages in certain
-special circumstances.
-
- For example, on rare occasions, there may be a special need to
-encourage the widest possible use of a certain library, so that it becomes
-a de-facto standard. To achieve this, non-free programs must be
-allowed to use the library. A more frequent case is that a free
-library does the same job as widely used non-free libraries. In this
-case, there is little to gain by limiting the free library to free
-software only, so we use the Lesser General Public License.
-
- In other cases, permission to use a particular library in non-free
-programs enables a greater number of people to use a large body of
-free software. For example, permission to use the GNU C Library in
-non-free programs enables many more people to use the whole GNU
-operating system, as well as its variant, the GNU/Linux operating
-system.
-
- Although the Lesser General Public License is Less protective of the
-users' freedom, it does ensure that the user of a program that is
-linked with the Library has the freedom and the wherewithal to run
-that program using a modified version of the Library.
-
- The precise terms and conditions for copying, distribution and
-modification follow. Pay close attention to the difference between a
-"work based on the library" and a "work that uses the library". The
-former contains code derived from the library, whereas the latter must
-be combined with the library in order to run.
-
- GNU LESSER GENERAL PUBLIC LICENSE
- TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
- 0. This License Agreement applies to any software library or other
-program which contains a notice placed by the copyright holder or
-other authorized party saying it may be distributed under the terms of
-this Lesser General Public License (also called "this License").
-Each licensee is addressed as "you".
-
- A "library" means a collection of software functions and/or data
-prepared so as to be conveniently linked with application programs
-(which use some of those functions and data) to form executables.
-
- The "Library", below, refers to any such software library or work
-which has been distributed under these terms. A "work based on the
-Library" means either the Library or any derivative work under
-copyright law: that is to say, a work containing the Library or a
-portion of it, either verbatim or with modifications and/or translated
-straightforwardly into another language. (Hereinafter, translation is
-included without limitation in the term "modification".)
-
- "Source code" for a work means the preferred form of the work for
-making modifications to it. For a library, complete source code means
-all the source code for all modules it contains, plus any associated
-interface definition files, plus the scripts used to control compilation
-and installation of the library.
-
- Activities other than copying, distribution and modification are not
-covered by this License; they are outside its scope. The act of
-running a program using the Library is not restricted, and output from
-such a program is covered only if its contents constitute a work based
-on the Library (independent of the use of the Library in a tool for
-writing it). Whether that is true depends on what the Library does
-and what the program that uses the Library does.
-
- 1. You may copy and distribute verbatim copies of the Library's
-complete source code as you receive it, in any medium, provided that
-you conspicuously and appropriately publish on each copy an
-appropriate copyright notice and disclaimer of warranty; keep intact
-all the notices that refer to this License and to the absence of any
-warranty; and distribute a copy of this License along with the
-Library.
-
- You may charge a fee for the physical act of transferring a copy,
-and you may at your option offer warranty protection in exchange for a
-fee.
-
- 2. You may modify your copy or copies of the Library or any portion
-of it, thus forming a work based on the Library, and copy and
-distribute such modifications or work under the terms of Section 1
-above, provided that you also meet all of these conditions:
-
- a) The modified work must itself be a software library.
-
- b) You must cause the files modified to carry prominent notices
- stating that you changed the files and the date of any change.
-
- c) You must cause the whole of the work to be licensed at no
- charge to all third parties under the terms of this License.
-
- d) If a facility in the modified Library refers to a function or a
- table of data to be supplied by an application program that uses
- the facility, other than as an argument passed when the facility
- is invoked, then you must make a good faith effort to ensure that,
- in the event an application does not supply such function or
- table, the facility still operates, and performs whatever part of
- its purpose remains meaningful.
-
- (For example, a function in a library to compute square roots has
- a purpose that is entirely well-defined independent of the
- application. Therefore, Subsection 2d requires that any
- application-supplied function or table used by this function must
- be optional: if the application does not supply it, the square
- root function must still compute square roots.)
-
-These requirements apply to the modified work as a whole. If
-identifiable sections of that work are not derived from the Library,
-and can be reasonably considered independent and separate works in
-themselves, then this License, and its terms, do not apply to those
-sections when you distribute them as separate works. But when you
-distribute the same sections as part of a whole which is a work based
-on the Library, the distribution of the whole must be on the terms of
-this License, whose permissions for other licensees extend to the
-entire whole, and thus to each and every part regardless of who wrote
-it.
-
-Thus, it is not the intent of this section to claim rights or contest
-your rights to work written entirely by you; rather, the intent is to
-exercise the right to control the distribution of derivative or
-collective works based on the Library.
-
-In addition, mere aggregation of another work not based on the Library
-with the Library (or with a work based on the Library) on a volume of
-a storage or distribution medium does not bring the other work under
-the scope of this License.
-
- 3. You may opt to apply the terms of the ordinary GNU General Public
-License instead of this License to a given copy of the Library. To do
-this, you must alter all the notices that refer to this License, so
-that they refer to the ordinary GNU General Public License, version 2,
-instead of to this License. (If a newer version than version 2 of the
-ordinary GNU General Public License has appeared, then you can specify
-that version instead if you wish.) Do not make any other change in
-these notices.
-
- Once this change is made in a given copy, it is irreversible for
-that copy, so the ordinary GNU General Public License applies to all
-subsequent copies and derivative works made from that copy.
-
- This option is useful when you wish to copy part of the code of
-the Library into a program that is not a library.
-
- 4. You may copy and distribute the Library (or a portion or
-derivative of it, under Section 2) in object code or executable form
-under the terms of Sections 1 and 2 above provided that you accompany
-it with the complete corresponding machine-readable source code, which
-must be distributed under the terms of Sections 1 and 2 above on a
-medium customarily used for software interchange.
-
- If distribution of object code is made by offering access to copy
-from a designated place, then offering equivalent access to copy the
-source code from the same place satisfies the requirement to
-distribute the source code, even though third parties are not
-compelled to copy the source along with the object code.
-
- 5. A program that contains no derivative of any portion of the
-Library, but is designed to work with the Library by being compiled or
-linked with it, is called a "work that uses the Library". Such a
-work, in isolation, is not a derivative work of the Library, and
-therefore falls outside the scope of this License.
-
- However, linking a "work that uses the Library" with the Library
-creates an executable that is a derivative of the Library (because it
-contains portions of the Library), rather than a "work that uses the
-library". The executable is therefore covered by this License.
-Section 6 states terms for distribution of such executables.
-
- When a "work that uses the Library" uses material from a header file
-that is part of the Library, the object code for the work may be a
-derivative work of the Library even though the source code is not.
-Whether this is true is especially significant if the work can be
-linked without the Library, or if the work is itself a library. The
-threshold for this to be true is not precisely defined by law.
-
- If such an object file uses only numerical parameters, data
-structure layouts and accessors, and small macros and small inline
-functions (ten lines or less in length), then the use of the object
-file is unrestricted, regardless of whether it is legally a derivative
-work. (Executables containing this object code plus portions of the
-Library will still fall under Section 6.)
-
- Otherwise, if the work is a derivative of the Library, you may
-distribute the object code for the work under the terms of Section 6.
-Any executables containing that work also fall under Section 6,
-whether or not they are linked directly with the Library itself.
-
- 6. As an exception to the Sections above, you may also combine or
-link a "work that uses the Library" with the Library to produce a
-work containing portions of the Library, and distribute that work
-under terms of your choice, provided that the terms permit
-modification of the work for the customer's own use and reverse
-engineering for debugging such modifications.
-
- You must give prominent notice with each copy of the work that the
-Library is used in it and that the Library and its use are covered by
-this License. You must supply a copy of this License. If the work
-during execution displays copyright notices, you must include the
-copyright notice for the Library among them, as well as a reference
-directing the user to the copy of this License. Also, you must do one
-of these things:
-
- a) Accompany the work with the complete corresponding
- machine-readable source code for the Library including whatever
- changes were used in the work (which must be distributed under
- Sections 1 and 2 above); and, if the work is an executable linked
- with the Library, with the complete machine-readable "work that
- uses the Library", as object code and/or source code, so that the
- user can modify the Library and then relink to produce a modified
- executable containing the modified Library. (It is understood
- that the user who changes the contents of definitions files in the
- Library will not necessarily be able to recompile the application
- to use the modified definitions.)
-
- b) Use a suitable shared library mechanism for linking with the
- Library. A suitable mechanism is one that (1) uses at run time a
- copy of the library already present on the user's computer system,
- rather than copying library functions into the executable, and (2)
- will operate properly with a modified version of the library, if
- the user installs one, as long as the modified version is
- interface-compatible with the version that the work was made with.
-
- c) Accompany the work with a written offer, valid for at
- least three years, to give the same user the materials
- specified in Subsection 6a, above, for a charge no more
- than the cost of performing this distribution.
-
- d) If distribution of the work is made by offering access to copy
- from a designated place, offer equivalent access to copy the above
- specified materials from the same place.
-
- e) Verify that the user has already received a copy of these
- materials or that you have already sent this user a copy.
-
- For an executable, the required form of the "work that uses the
-Library" must include any data and utility programs needed for
-reproducing the executable from it. However, as a special exception,
-the materials to be distributed need not include anything that is
-normally distributed (in either source or binary form) with the major
-components (compiler, kernel, and so on) of the operating system on
-which the executable runs, unless that component itself accompanies
-the executable.
-
- It may happen that this requirement contradicts the license
-restrictions of other proprietary libraries that do not normally
-accompany the operating system. Such a contradiction means you cannot
-use both them and the Library together in an executable that you
-distribute.
-
- 7. You may place library facilities that are a work based on the
-Library side-by-side in a single library together with other library
-facilities not covered by this License, and distribute such a combined
-library, provided that the separate distribution of the work based on
-the Library and of the other library facilities is otherwise
-permitted, and provided that you do these two things:
-
- a) Accompany the combined library with a copy of the same work
- based on the Library, uncombined with any other library
- facilities. This must be distributed under the terms of the
- Sections above.
-
- b) Give prominent notice with the combined library of the fact
- that part of it is a work based on the Library, and explaining
- where to find the accompanying uncombined form of the same work.
-
- 8. You may not copy, modify, sublicense, link with, or distribute
-the Library except as expressly provided under this License. Any
-attempt otherwise to copy, modify, sublicense, link with, or
-distribute the Library is void, and will automatically terminate your
-rights under this License. However, parties who have received copies,
-or rights, from you under this License will not have their licenses
-terminated so long as such parties remain in full compliance.
-
- 9. You are not required to accept this License, since you have not
-signed it. However, nothing else grants you permission to modify or
-distribute the Library or its derivative works. These actions are
-prohibited by law if you do not accept this License. Therefore, by
-modifying or distributing the Library (or any work based on the
-Library), you indicate your acceptance of this License to do so, and
-all its terms and conditions for copying, distributing or modifying
-the Library or works based on it.
-
- 10. Each time you redistribute the Library (or any work based on the
-Library), the recipient automatically receives a license from the
-original licensor to copy, distribute, link with or modify the Library
-subject to these terms and conditions. You may not impose any further
-restrictions on the recipients' exercise of the rights granted herein.
-You are not responsible for enforcing compliance by third parties with
-this License.
-
- 11. If, as a consequence of a court judgment or allegation of patent
-infringement or for any other reason (not limited to patent issues),
-conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot
-distribute so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you
-may not distribute the Library at all. For example, if a patent
-license would not permit royalty-free redistribution of the Library by
-all those who receive copies directly or indirectly through you, then
-the only way you could satisfy both it and this License would be to
-refrain entirely from distribution of the Library.
-
-If any portion of this section is held invalid or unenforceable under any
-particular circumstance, the balance of the section is intended to apply,
-and the section as a whole is intended to apply in other circumstances.
-
-It is not the purpose of this section to induce you to infringe any
-patents or other property right claims or to contest validity of any
-such claims; this section has the sole purpose of protecting the
-integrity of the free software distribution system which is
-implemented by public license practices. Many people have made
-generous contributions to the wide range of software distributed
-through that system in reliance on consistent application of that
-system; it is up to the author/donor to decide if he or she is willing
-to distribute software through any other system and a licensee cannot
-impose that choice.
-
-This section is intended to make thoroughly clear what is believed to
-be a consequence of the rest of this License.
-
- 12. If the distribution and/or use of the Library is restricted in
-certain countries either by patents or by copyrighted interfaces, the
-original copyright holder who places the Library under this License may add
-an explicit geographical distribution limitation excluding those countries,
-so that distribution is permitted only in or among countries not thus
-excluded. In such case, this License incorporates the limitation as if
-written in the body of this License.
-
- 13. The Free Software Foundation may publish revised and/or new
-versions of the Lesser General Public License from time to time.
-Such new versions will be similar in spirit to the present version,
-but may differ in detail to address new problems or concerns.
-
-Each version is given a distinguishing version number. If the Library
-specifies a version number of this License which applies to it and
-"any later version", you have the option of following the terms and
-conditions either of that version or of any later version published by
-the Free Software Foundation. If the Library does not specify a
-license version number, you may choose any version ever published by
-the Free Software Foundation.
-
- 14. If you wish to incorporate parts of the Library into other free
-programs whose distribution conditions are incompatible with these,
-write to the author to ask for permission. For software which is
-copyrighted by the Free Software Foundation, write to the Free
-Software Foundation; we sometimes make exceptions for this. Our
-decision will be guided by the two goals of preserving the free status
-of all derivatives of our free software and of promoting the sharing
-and reuse of software generally.
-
- NO WARRANTY
-
- 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
-WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
-EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
-OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
-KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
-LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
-THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
-WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
-AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
-FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
-CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
-LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
-RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
-FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
-SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
-DAMAGES.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Libraries
-
- If you develop a new library, and you want it to be of the greatest
-possible use to the public, we recommend making it free software that
-everyone can redistribute and change. You can do so by permitting
-redistribution under these terms (or, alternatively, under the terms of the
-ordinary General Public License).
-
- To apply these terms, attach the following notices to the library. It is
-safest to attach them to the start of each source file to most effectively
-convey the exclusion of warranty; and each file should have at least the
-"copyright" line and a pointer to where the full notice is found.
-
-
- Copyright (C)
-
- This library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this library; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-
-Also add information on how to contact you by electronic and paper mail.
-
-You should also get your employer (if you work as a programmer) or your
-school, if any, to sign a "copyright disclaimer" for the library, if
-necessary. Here is a sample; alter the names:
-
- Yoyodyne, Inc., hereby disclaims all copyright interest in the
- library `Frob' (a library for tweaking knobs) written by James Random Hacker.
-
- , 1 April 1990
- Ty Coon, President of Vice
-
-That's all there is to it!
-
-----------------------------------------------------------------------
-
-The bundled SHA1 collision detection code is licensed under the MIT license:
-
-MIT License
-
-Copyright (c) 2017:
- Marc Stevens
- Cryptology Group
- Centrum Wiskunde & Informatica
- P.O. Box 94079, 1090 GB Amsterdam, Netherlands
- marc@marc-stevens.nl
-
- Dan Shumow
- Microsoft Research
- danshu@microsoft.com
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-----------------------------------------------------------------------
-
-The bundled wildmatch code is licensed under the BSD license:
-
-Copyright Rich Salz.
-All rights reserved.
-
-Redistribution and use in any form are permitted provided that the
-following restrictions are are met:
-
-1. Source distributions must retain this entire copyright notice
- and comment.
-2. Binary distributions must include the acknowledgement ``This
- product includes software developed by Rich Salz'' in the
- documentation or other materials provided with the
- distribution. This must not be represented as an endorsement
- or promotion without specific prior written permission.
-3. The origin of this software must not be misrepresented, either
- by explicit claim or by omission. Credits must appear in the
- source and documentation.
-4. Altered versions must be plainly marked as such in the source
- and documentation and must not be misrepresented as being the
- original software.
-
-THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
-WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
-
-----------------------------------------------------------------------
-
-Portions of the OpenSSL headers are included under the OpenSSL license:
-
-Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
-All rights reserved.
-
-This package is an SSL implementation written
-by Eric Young (eay@cryptsoft.com).
-The implementation was written so as to conform with Netscapes SSL.
-
-This library is free for commercial and non-commercial use as long as
-the following conditions are aheared to. The following conditions
-apply to all code found in this distribution, be it the RC4, RSA,
-lhash, DES, etc., code; not just the SSL code. The SSL documentation
-included with this distribution is covered by the same copyright terms
-except that the holder is Tim Hudson (tjh@cryptsoft.com).
-
-Copyright remains Eric Young's, and as such any Copyright notices in
-the code are not to be removed.
-If this package is used in a product, Eric Young should be given attribution
-as the author of the parts of the library used.
-This can be in the form of a textual message at program startup or
-in documentation (online or textual) provided with the package.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions
-are met:
-1. Redistributions of source code must retain the copyright
- notice, this list of conditions and the following disclaimer.
-2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-3. All advertising materials mentioning features or use of this software
- must display the following acknowledgement:
- "This product includes cryptographic software written by
- Eric Young (eay@cryptsoft.com)"
- The word 'cryptographic' can be left out if the rouines from the library
- being used are not cryptographic related :-).
-4. If you include any Windows specific code (or a derivative thereof) from
- the apps directory (application code) you must include an acknowledgement:
- "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
-
-THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
-OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
-OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGE.
-
-The licence and distribution terms for any publically available version or
-derivative of this code cannot be changed. i.e. this code cannot simply be
-copied and put under another distribution licence
-[including the GNU Public Licence.]
-
-====================================================================
-Copyright (c) 1998-2007 The OpenSSL Project. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions
-are met:
-
-1. Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
-
-2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
-
-3. All advertising materials mentioning features or use of this
- software must display the following acknowledgment:
- "This product includes software developed by the OpenSSL Project
- for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
-
-4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
- endorse or promote products derived from this software without
- prior written permission. For written permission, please contact
- openssl-core@openssl.org.
-
-5. Products derived from this software may not be called "OpenSSL"
- nor may "OpenSSL" appear in their names without prior written
- permission of the OpenSSL Project.
-
-6. Redistributions of any form whatsoever must retain the following
- acknowledgment:
- "This product includes software developed by the OpenSSL Project
- for use in the OpenSSL Toolkit (http://www.openssl.org/)"
-
-THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
-EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
-ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
-OF THE POSSIBILITY OF SUCH DAMAGE.
-
-----------------------------------------------------------------------
-
-The xoroshiro256** implementation is licensed in the public domain:
-
-Written in 2018 by David Blackman and Sebastiano Vigna (vigna@acm.org)
-
-To the extent possible under law, the author has dedicated all copyright
-and related and neighboring rights to this software to the public domain
-worldwide. This software is distributed without any warranty.
-
-See .
-
-----------------------------------------------------------------------
-
-The built-in SHA256 support (src/hash/rfc6234) is taken from RFC 6234
-under the following license:
-
-Copyright (c) 2011 IETF Trust and the persons identified as
-authors of the code. All rights reserved.
-
-Redistribution and use in source and binary forms, with or
-without modification, are permitted provided that the following
-conditions are met:
-
-- Redistributions of source code must retain the above
- copyright notice, this list of conditions and
- the following disclaimer.
-
-- Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following
- disclaimer in the documentation and/or other materials provided
- with the distribution.
-
-- Neither the name of Internet Society, IETF or IETF Trust, nor
- the names of specific contributors, may be used to endorse or
- promote products derived from this software without specific
- prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
-CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
-INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
-CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
-NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
-OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
-EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/CHANGELOG.md b/CHANGELOG.md
index a16e34ba9..74cb010a9 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,6 +2,1410 @@
All notable changes to this project are documented in this file.
+## 1.7.0
+
+**Release date:** 2025-09-15
+
+This minor release comes with new features, improvements and bug fixes.
+
+### ExternalArtifact
+
+A new [ExternalArtifact](https://github.com/fluxcd/source-controller/blob/main/docs/spec/v1/externalartifacts.md) API has been added to the `source.toolkit.fluxcd.io` group. This API enables advanced source composition and decomposition patterns implemented by the [source-watcher](https://github.com/fluxcd/source-watcher) controller.
+
+### GitRepository
+
+GitRepository controller now includes fixes for stalling issues and improved error handling. Multi-tenant workload identity support has been added for Azure repositories when the `ObjectLevelWorkloadIdentity` feature gate is enabled. TLS configuration support has been added for GitHub App authentication.
+
+### Bucket
+
+Bucket controller now supports multi-tenant workload identity for AWS, Azure and GCP providers when the `ObjectLevelWorkloadIdentity` feature gate is enabled. A default service account flag has been added for lockdown scenarios.
+
+### General updates
+
+The controller now supports system certificate pools for improved CA compatibility, and TLS ServerName pinning has been removed from TLS configuration for better flexibility. A `--default-service-account=` flag was introduced for workload identity multi-tenancy lockdown.
+
+In addition, the Kubernetes dependencies have been updated to v1.34, Helm
+has been updated to v3.19 and various other controller dependencies have
+been updated to their latest version. The controller is now built with
+Go 1.25.
+
+Fixes:
+- Fix GitRepository controller stalling when it shouldn't
+ [#1865](https://github.com/fluxcd/source-controller/pull/1865)
+
+Improvements:
+- [RFC-0010] Add multi-tenant workload identity support for GCP Bucket
+ [#1862](https://github.com/fluxcd/source-controller/pull/1862)
+- [RFC-0010] Add multi-tenant workload identity support for AWS Bucket
+ [#1868](https://github.com/fluxcd/source-controller/pull/1868)
+- [RFC-0010] Add multi-tenant workload identity support for Azure GitRepository
+ [#1871](https://github.com/fluxcd/source-controller/pull/1871)
+- [RFC-0010] Add default-service-account for lockdown
+ [#1872](https://github.com/fluxcd/source-controller/pull/1872)
+- [RFC-0010] Add multi-tenant workload identity support for Azure Blob Storage
+ [#1875](https://github.com/fluxcd/source-controller/pull/1875)
+- [RFC-0012] Add ExternalArtifact API documentation
+ [#1881](https://github.com/fluxcd/source-controller/pull/1881)
+- [RFC-0012] Refactor controller to use `fluxcd/pkg/artifact`
+ [#1883](https://github.com/fluxcd/source-controller/pull/1883)
+- Migrate OCIRepository controller to runtime/secrets
+ [#1851](https://github.com/fluxcd/source-controller/pull/1851)
+- Migrate Bucket controller to runtime/secrets
+ [#1852](https://github.com/fluxcd/source-controller/pull/1852)
+- Add TLS config for GitHub App authentication
+ [#1860](https://github.com/fluxcd/source-controller/pull/1860)
+- Remove ServerName pinning from TLS config
+ [#1870](https://github.com/fluxcd/source-controller/pull/1870)
+- Extract storage operations to a dedicated package
+ [#1864](https://github.com/fluxcd/source-controller/pull/1864)
+- Remove deprecated APIs in group `source.toolkit.fluxcd.io/v1beta1`
+ [#1861](https://github.com/fluxcd/source-controller/pull/1861)
+- Migrate tests from gotest to gomega
+ [#1876](https://github.com/fluxcd/source-controller/pull/1876)
+- Update dependencies
+ [#1888](https://github.com/fluxcd/source-controller/pull/1888)
+ [#1880](https://github.com/fluxcd/source-controller/pull/1880)
+ [#1878](https://github.com/fluxcd/source-controller/pull/1878)
+ [#1876](https://github.com/fluxcd/source-controller/pull/1876)
+ [#1874](https://github.com/fluxcd/source-controller/pull/1874)
+ [#1850](https://github.com/fluxcd/source-controller/pull/1850)
+ [#1844](https://github.com/fluxcd/source-controller/pull/1844)
+
+## 1.6.2
+
+**Release date:** 2025-06-27
+
+This patch release comes with a fix for `rsa-sha2-512` and `rsa-sha2-256` algorithms
+not being prioritized for `ssh-rsa` host keys.
+
+Fixes:
+- Fix: Prioritize sha2-512 and sha2-256 for ssh-rsa host keys
+ [#1839](https://github.com/fluxcd/source-controller/pull/1839)
+
+## 1.6.1
+
+**Release date:** 2025-06-13
+
+This patch release comes with a fix for the `knownhosts: key mismatch`
+error in the `GitRepository` API when using SSH authentication, and
+a fix for authentication with
+[public ECR repositories](https://fluxcd.io/flux/integrations/aws/#for-amazon-public-elastic-container-registry)
+in the `OCIRepository` API.
+
+Fix:
+- Fix authentication for public ECR
+ [#1825](https://github.com/fluxcd/source-controller/pull/1825)
+- Fix `knownhosts key mismatch` regression bug
+ [#1829](https://github.com/fluxcd/source-controller/pull/1829)
+
+## 1.6.0
+
+**Release date:** 2025-05-27
+
+This minor release promotes the OCIRepository API to GA, and comes with new features,
+improvements and bug fixes.
+
+### OCIRepository
+
+The `OCIRepository` API has been promoted from `v1beta2` to `v1` (GA).
+The `v1` API is backwards compatible with `v1beta2`.
+
+OCIRepository API now supports object-level workload identity by setting
+`.spec.provider` to one of `aws`, `azure`, or `gcp`, and setting
+`.spec.serviceAccountName` to the name of a service account in the same
+namespace that has been configured with appropriate cloud permissions.
+For this feature to work, the controller feature gate
+`ObjectLevelWorkloadIdentity` must be enabled. See a complete guide
+[here](https://fluxcd.io/flux/integrations/).
+
+OCIRepository API now caches registry credentials for cloud providers
+by default. This behavior can be disabled or fine-tuned by adjusting the
+token cache controller flags (see [docs](https://fluxcd.io/flux/components/source/options/)).
+The token cache also exposes metrics that are documented
+[here](https://fluxcd.io/flux/monitoring/metrics/#controller-metrics).
+
+### GitRepository
+
+GitRepository API now supports sparse checkout by setting a list
+of directories in the `.spec.sparseCheckout` field. This allows
+for optimizing the amount of data fetched from the Git repository.
+
+GitRepository API now supports mTLS authentication for HTTPS Git repositories
+by setting the fields `tls.crt`, `tls.key`, and `ca.crt` in the `.data` field
+of the referenced Secret in `.spec.secretRef`.
+
+GitRepository API now caches credentials for non-`generic` providers by default.
+This behavior can be disabled or fine-tuned by adjusting the
+token cache controller flags (see [docs](https://fluxcd.io/flux/components/source/options/)).
+The token cache also exposes metrics that are documented
+[here](https://fluxcd.io/flux/monitoring/metrics/#controller-metrics).
+
+### General updates
+
+In addition, the Kubernetes dependencies have been updated to v1.33 and
+various other controller dependencies have been updated to their latest
+version. The controller is now built with Go 1.24.
+
+Fixes:
+- Downgrade `Masterminds/semver` to v3.3.0
+ [#1785](https://github.com/fluxcd/source-controller/pull/1785)
+
+Improvements:
+- Promote OCIRepository API to v1 (GA)
+ [#1794](https://github.com/fluxcd/source-controller/pull/1794)
+- [RFC-0010] Introduce object-level workload identity for container registry APIs and cache credentials
+ [#1790](https://github.com/fluxcd/source-controller/pull/1790)
+ [#1802](https://github.com/fluxcd/source-controller/pull/1802)
+ [#1811](https://github.com/fluxcd/source-controller/pull/1811)
+- Implement Sparse Checkout for `GitRepository`
+ [#1774](https://github.com/fluxcd/source-controller/pull/1774)
+- Add Mutual TLS support to `GitRepository`
+ [#1778](https://github.com/fluxcd/source-controller/pull/1778)
+- Introduce token cache for `GitRepository`
+ [#1745](https://github.com/fluxcd/source-controller/pull/1745)
+ [#1788](https://github.com/fluxcd/source-controller/pull/1788)
+ [#1789](https://github.com/fluxcd/source-controller/pull/1789)
+- Build controller without CGO
+ [#1725](https://github.com/fluxcd/source-controller/pull/1725)
+- Various dependency updates
+ [#1812](https://github.com/fluxcd/source-controller/pull/1812)
+ [#1800](https://github.com/fluxcd/source-controller/pull/1800)
+ [#1810](https://github.com/fluxcd/source-controller/pull/1810)
+ [#1806](https://github.com/fluxcd/source-controller/pull/1806)
+ [#1782](https://github.com/fluxcd/source-controller/pull/1782)
+ [#1783](https://github.com/fluxcd/source-controller/pull/1783)
+ [#1775](https://github.com/fluxcd/source-controller/pull/1775)
+ [#1728](https://github.com/fluxcd/source-controller/pull/1728)
+ [#1722](https://github.com/fluxcd/source-controller/pull/1722)
+
+## 1.5.0
+
+**Release date:** 2025-02-13
+
+This minor release comes with various bug fixes and improvements.
+
+### GitRepository
+
+The GitRepository API now supports authenticating through GitHub App
+for GitHub repositories. See
+[docs](https://fluxcd.io/flux/components/source/gitrepositories/#github).
+
+In addition, the Kubernetes dependencies have been updated to v1.32.1, Helm has
+been updated to v3.17.0 and various other controller dependencies have been
+updated to their latest version.
+
+Fixes:
+- Remove deprecated object metrics from controllers
+ [#1686](https://github.com/fluxcd/source-controller/pull/1686)
+
+Improvements:
+- [RFC-007] Implement GitHub app authentication for git repositories.
+ [#1647](https://github.com/fluxcd/source-controller/pull/1647)
+- Various dependency updates
+ [#1684](https://github.com/fluxcd/source-controller/pull/1684)
+ [#1689](https://github.com/fluxcd/source-controller/pull/1689)
+ [#1693](https://github.com/fluxcd/source-controller/pull/1693)
+ [#1705](https://github.com/fluxcd/source-controller/pull/1705)
+ [#1708](https://github.com/fluxcd/source-controller/pull/1708)
+ [#1709](https://github.com/fluxcd/source-controller/pull/1709)
+ [#1713](https://github.com/fluxcd/source-controller/pull/1713)
+ [#1716](https://github.com/fluxcd/source-controller/pull/1716)
+
+## 1.4.1
+
+**Release date:** 2024-09-26
+
+This patch release comes with a fix to the `GitRepository` API to keep it
+backwards compatible by removing the default value for `.spec.provider` field
+when not set in the API. The controller will internally consider an empty value
+for the provider as the `generic` provider.
+
+Fix:
+- GitRepo: Remove provider default value from API
+ [#1626](https://github.com/fluxcd/source-controller/pull/1626)
+
+## 1.4.0
+
+**Release date:** 2024-09-25
+
+This minor release promotes the Bucket API to GA, and comes with new features,
+improvements and bug fixes.
+
+### Bucket
+
+The `Bucket` API has been promoted from `v1beta2` to `v1` (GA).
+The `v1` API is backwards compatible with `v1beta2`.
+
+Bucket API now supports proxy through the field `.spec.proxySecretRef` and custom TLS client certificate and CA through the field `.spec.certSecretRef`.
+
+Bucket API now also supports specifying a custom STS configuration through the field `.spec.sts`. This is currently only supported for the providers `generic` and `aws`. When specifying a custom STS configuration one must specify which STS provider to use. For the `generic` bucket provider we support the `ldap` STS provider, and for the `aws` bucket provider we support the `aws` STS provider. For the `aws` STS provider, one may use the default main STS endpoint, or the regional STS endpoints, or even an interface endpoint.
+
+### OCIRepository
+
+OCIRepository API now supports proxy through the field `.spec.proxySecretRef`.
+
+**Warning**: Proxy is not supported for cosign keyless verification.
+
+### GitRepository
+
+GitRepository API now supports OIDC authentication for Azure DevOps repositories through the field `.spec.provider` using the value `azure`. See the docs for details [here](https://fluxcd.io/flux/components/source/gitrepositories/#provider).
+
+In addition, the Kubernetes dependencies have been updated to v1.31.1, Helm has
+been updated to v3.16.1 and various other controller dependencies have been
+updated to their latest version. The controller is now built with Go 1.23.
+
+Fixes:
+- helm: Use the default transport pool to preserve proxy settings
+ [#1490](https://github.com/fluxcd/source-controller/pull/1490)
+- Fix incorrect use of format strings with the conditions package.
+ [#1529](https://github.com/fluxcd/source-controller/pull/1529)
+- Fix HelmChart local dependency resolution for name-based path
+ [#1539](https://github.com/fluxcd/source-controller/pull/1539)
+- Fix Helm index validation for Artifactory
+ [#1516](https://github.com/fluxcd/source-controller/pull/1516)
+
+Improvements:
+- Promote Bucket API to v1
+ [#1592](https://github.com/fluxcd/source-controller/pull/1592)
+- Add .spec.certSecretRef to Bucket API
+ [#1475](https://github.com/fluxcd/source-controller/pull/1475)
+- Run ARM64 tests on GitHub runners
+ [#1512](https://github.com/fluxcd/source-controller/pull/1512)
+- Add support for .spec.proxySecretRef for generic provider of Bucket API
+ [#1500](https://github.com/fluxcd/source-controller/pull/1500)
+- Improve invalid proxy error message for Bucket API
+ [#1550](https://github.com/fluxcd/source-controller/pull/1550)
+- Add support for AWS STS endpoint in the Bucket API
+ [#1552](https://github.com/fluxcd/source-controller/pull/1552)
+- Add proxy support for GCS buckets
+ [#1565](https://github.com/fluxcd/source-controller/pull/1565)
+- azure-blob: Fix VisitObjects() in integration test
+ [#1574](https://github.com/fluxcd/source-controller/pull/1574)
+- Add proxy support for Azure buckets
+ [#1567](https://github.com/fluxcd/source-controller/pull/1567)
+- Add proxy support for AWS S3 buckets
+ [#1568](https://github.com/fluxcd/source-controller/pull/1568)
+- Add proxy support for OCIRepository API
+ [#1536](https://github.com/fluxcd/source-controller/pull/1536)
+- Add LDAP provider for Bucket STS API
+ [#1585](https://github.com/fluxcd/source-controller/pull/1585)
+- Introduce Bucket provider constants with the common part as a prefix
+ [#1589](https://github.com/fluxcd/source-controller/pull/1589)
+- OCIRepository: Configure proxy for OIDC auth
+ [#1607](https://github.com/fluxcd/source-controller/pull/1607)
+- [RFC-0007] Enable Azure OIDC for Azure DevOps repositories
+ [#1591](https://github.com/fluxcd/source-controller/pull/1591)
+- Build with Go 1.23
+ [#1582](https://github.com/fluxcd/source-controller/pull/1582)
+- Various dependency updates
+ [#1507](https://github.com/fluxcd/source-controller/pull/1507)
+ [#1576](https://github.com/fluxcd/source-controller/pull/1576)
+ [#1578](https://github.com/fluxcd/source-controller/pull/1578)
+ [#1579](https://github.com/fluxcd/source-controller/pull/1579)
+ [#1583](https://github.com/fluxcd/source-controller/pull/1583)
+ [#1588](https://github.com/fluxcd/source-controller/pull/1588)
+ [#1603](https://github.com/fluxcd/source-controller/pull/1603)
+ [#1610](https://github.com/fluxcd/source-controller/pull/1610)
+ [#1614](https://github.com/fluxcd/source-controller/pull/1614)
+ [#1618](https://github.com/fluxcd/source-controller/pull/1618)
+
+## 1.3.0
+
+**Release date:** 2024-05-03
+
+This minor release promotes the Helm APIs to GA, and comes with new features,
+improvements and bug fixes.
+
+### HelmRepository
+
+The `HelmRepository` API has been promoted from `v1beta2` to `v1` (GA).
+The `v1` API is backwards compatible with `v1beta2`.
+
+For `HelmRepository` of type `oci`, the `.spec.insecure` field allows connecting
+over HTTP to an insecure non-TLS container registry.
+
+To upgrade from `v1beta2`, after deploying the new CRD and controller,
+set `apiVersion: source.toolkit.fluxcd.io/v1` in the YAML files that
+contain `HelmRepository` definitions.
+Bumping the API version in manifests can be done gradually.
+It is advised not to delay this procedure as the beta versions will be removed after 6 months.
+
+### HelmChart
+
+The `HelmChart` API have been promoted from `v1beta2` to `v1` (GA).
+The `v1` API is backwards compatible with `v1beta2`, with the exception
+of the removal of the deprecated field `.spec.valuesFile` which was replaced with `spec.valuesFiles`.
+
+The `HelmChart` API was extended with support for
+[Notation signature verification](https://github.com/fluxcd/source-controller/blob/release/v1.3.x/docs/spec/v1/helmcharts.md#notation)
+of Helm OCI charts.
+
+A new optional field `.spec.ignoreMissingValuesFiles` has been added,
+which allows the controller to ignore missing values files rather than failing to reconcile the `HelmChart`.
+
+### OCIRepository
+
+The `OCIRepository` API was extended with support for
+[Notation signature verification](https://github.com/fluxcd/source-controller/blob/release/v1.3.x/docs/spec/v1beta2/ocirepositories.md#notation)
+of OCI artifacts.
+
+A new optional field `.spec.ref.semverFilter` has been added,
+which allows the controller to filter the tags based on regular expressions
+before applying the semver range. This allows
+[picking the latest release candidate](https://github.com/fluxcd/source-controller/blob/release/v1.3.x/docs/spec/v1beta2/ocirepositories.md#semverfilter-example)
+instead of the latest stable release.
+
+In addition, the controller has been updated to Kubernetes v1.30.0,
+Helm v3.14.4, and various other dependencies to their latest version
+to patch upstream CVEs.
+
+Improvements:
+- Promote Helm APIs to `source.toolkit.fluxcd.io/v1` (GA)
+ [#1428](https://github.com/fluxcd/source-controller/pull/1428)
+- Add `.spec.ignoreMissingValuesFiles` to HelmChart API
+ [#1447](https://github.com/fluxcd/source-controller/pull/1447)
+- Implement `.spec.ref.semverFilter` in OCIRepository API
+ [#1407](https://github.com/fluxcd/source-controller/pull/1407)
+- Helm: Allow insecure registry login
+ [#1412](https://github.com/fluxcd/source-controller/pull/1442)
+- Add support for Notation verification to HelmChart and OCIRepository
+ [#1075](https://github.com/fluxcd/source-controller/pull/1075)
+- Various dependency updates
+ [#1442](https://github.com/fluxcd/source-controller/pull/1442)
+ [#1450](https://github.com/fluxcd/source-controller/pull/1450)
+ [#1469](https://github.com/fluxcd/source-controller/pull/1469)
+ [#1378](https://github.com/fluxcd/source-controller/pull/1378)
+
+Fixes:
+- Bind cached helm index to the maximum index size
+ [#1457](https://github.com/fluxcd/source-controller/pull/1457)
+- Remove `genclient:Namespaced` tag
+ [#1386](https://github.com/fluxcd/source-controller/pull/1386)
+
+## 1.2.5
+
+**Release date:** 2024-04-04
+
+This patch release comes with improvements to the `HelmChart` name validation
+and adds logging sanitization of connection error messages for `Bucket` sources.
+
+Fixes:
+- Improve chart name validation
+ [#1377](https://github.com/fluxcd/source-controller/pull/1377)
+- Sanitize URLs for bucket fetch error messages
+ [#1430](https://github.com/fluxcd/source-controller/pull/1430)
+
+Improvements:
+- Update controller-gen to v0.14.0
+ [#1399](https://github.com/fluxcd/source-controller/pull/1399)
+
+## 1.2.4
+
+**Release date:** 2024-02-01
+
+This patch release updates the Kubernetes dependencies to v1.28.6 and various
+other dependencies to their latest version to patch upstream CVEs.
+
+Improvements:
+- Various dependency updates
+ [#1362](https://github.com/fluxcd/source-controller/pull/1362)
+ [#1357](https://github.com/fluxcd/source-controller/pull/1357)
+ [#1353](https://github.com/fluxcd/source-controller/pull/1353)
+ [#1347](https://github.com/fluxcd/source-controller/pull/1347)
+ [#1343](https://github.com/fluxcd/source-controller/pull/1343)
+ [#1340](https://github.com/fluxcd/source-controller/pull/1340)
+ [#1338](https://github.com/fluxcd/source-controller/pull/1338)
+ [#1336](https://github.com/fluxcd/source-controller/pull/1336)
+ [#1334](https://github.com/fluxcd/source-controller/pull/1334)
+
+## 1.2.3
+
+**Release date:** 2023-12-14
+
+This patch release updates the controller's Helm dependency to v3.13.3.
+
+Improvements:
+- Update Helm to v3.13.3
+ [#1325](https://github.com/fluxcd/source-controller/pull/1325)
+- helmrepo: Remove migration log/event
+ [#1324](https://github.com/fluxcd/source-controller/pull/1324)
+
+## 1.2.2
+
+**Release date:** 2023-12-11
+
+This patch release addresses an issue with AWS ECR authentication introduced in
+v1.2.0.
+
+In addition, a variety of dependencies have been updated. Including an update
+of the container base image to Alpine v3.19.
+
+Fixes:
+- Address issue with authenticating towards AWS ECR
+ [#1318](https://github.com/fluxcd/source-controller/pull/1318)
+ [#1321](https://github.com/fluxcd/source-controller/pull/1318)
+
+Improvements:
+
+- Update dependencies
+ [#1314](https://github.com/fluxcd/source-controller/pull/1314)
+ [#1318](https://github.com/fluxcd/source-controller/pull/1318)
+ [#1321](https://github.com/fluxcd/source-controller/pull/1321)
+- build: update Alpine to 3.19
+ [#1316](https://github.com/fluxcd/source-controller/pull/1316)
+
+## 1.2.1
+
+**Release date:** 2023-12-08
+
+This patch release ensures the controller is built with the latest Go `1.21.x`
+release, to mitigate multiple security vulnerabilities which were published
+shortly after the release of v1.2.0.
+
+In addition, a small number of dependencies have been updated to their latest
+version.
+
+Improvements:
+- Update dependencies
+ [#1309](https://github.com/fluxcd/source-controller/pull/1309)
+
+## 1.2.0
+
+**Release date:** 2023-12-05
+
+This minor release comes with API changes, bug fixes and several new features.
+
+### Bucket
+
+A new field, `.spec.prefix`, has been added to the Bucket API, which enables
+server-side filtering of files if the object's `.spec.provider` is set to
+`generic`/`aws`/`gcp`.
+
+### OCIRepository and HelmChart
+
+Two new fields, `.spec.verify.matchOIDCIdentity.issuer` and
+`.spec.verify.matchOIDCIdentity.subject` have been added to the HelmChart and
+OCIRepository APIs. If the image has been keylessly signed via Cosign, these
+fields can be used to verify the OIDC issuer of the Fulcio certificate and the
+OIDC identity's subject respectively.
+
+### HelmRepository
+
+A new boolean field, `.spec.insecure`, has been introduced to the HelmRepository
+API, which allows connecting to a non-TLS HTTP container registry. It is only
+considered if the object's `.spec.type` is set to `oci`.
+
+From this release onwards, HelmRepository objects of type OCI are treated as
+static objects, i.e. they have an empty status.
+Existing objects undergo a one-time automatic migration and new objects
+will be undergo a one-time reconciliation to remove any status fields.
+
+Additionally, the controller now performs a shallow clone if the
+`.spec.ref.name` of the GitRepository object points to a branch or a tag.
+
+Furthermore, a bug has been fixed, where the controller would try to
+authenticate against public OCI registries if the HelmRepository object has a
+reference to a Secret containing a CA certificate.
+
+Lastly, dependencies have been updated to their latest version, including an
+update of Kubernetes to v1.28.4.
+
+Fixes:
+- Address miscellaneous issues throughout code base
+ [#1257](https://github.com/fluxcd/source-controller/pull/1257)
+- helmrepo: only configure tls login option when required
+ [#1289](https://github.com/fluxcd/source-controller/pull/1289)
+- oci: rename `OCIChartRepository.insecure` to `insecureHTTP`
+ [#1299](https://github.com/fluxcd/source-controller/pull/1299)
+- Use bitnami Minio oci chart for e2e
+ [#1301](https://github.com/fluxcd/source-controller/pull/1301)
+
+Improvements:
+- build(deps): bump Go dependencies
+ [#1260](https://github.com/fluxcd/source-controller/pull/1260)
+ [#1261](https://github.com/fluxcd/source-controller/pull/1261)
+ [#1269](https://github.com/fluxcd/source-controller/pull/1269)
+ [#1291](https://github.com/fluxcd/source-controller/pull/1291)
+- build(deps): bump the ci group dependencies
+ [#1265](https://github.com/fluxcd/source-controller/pull/1265)
+ [#1266](https://github.com/fluxcd/source-controller/pull/1266)
+ [#1272](https://github.com/fluxcd/source-controller/pull/1272)
+ [#1277](https://github.com/fluxcd/source-controller/pull/1277)
+ [#1281](https://github.com/fluxcd/source-controller/pull/1281)
+ [#1285](https://github.com/fluxcd/source-controller/pull/1285)
+ [#1296](https://github.com/fluxcd/source-controller/pull/1296)
+ [#1303](https://github.com/fluxcd/source-controller/pull/1303)
+- bucket: Add prefix filtering capability
+ [#1228](https://github.com/fluxcd/source-controller/pull/1228)
+- Static HelmRepository OCI
+ [#1243](https://github.com/fluxcd/source-controller/pull/1243)
+- cosign: allow identity matching for keyless verification
+ [#1250](https://github.com/fluxcd/source-controller/pull/1250)
+- Upgrade `go-git` to v5.10.0
+ [#1271](https://github.com/fluxcd/source-controller/pull/1271)
+- storage: change default file permissions
+ [#1276](https://github.com/fluxcd/source-controller/pull/1276)
+- Update dependencies to Kubernetes v1.28
+ [#1286](https://github.com/fluxcd/source-controller/pull/1286)
+- Add `.spec.insecure` to `HelmRepository` for `type: oci`
+ [#1288](https://github.com/fluxcd/source-controller/pull/1288)
+- Update Git dependencies
+ [#1300](https://github.com/fluxcd/source-controller/pull/1300)
+- Update Go dependencies
+ [#1304](https://github.com/fluxcd/source-controller/pull/1304)
+
+## 1.1.2
+
+**Release date:** 2023-10-11
+
+This patch release fixes a bug where OCIRepository objects can't be consumed
+when the OCI image layer contains symlinks.
+
+Fixes:
+- oci: Skip symlinks found in upstream artifacts
+ [#1246](https://github.com/fluxcd/source-controller/pull/1246/)
+
+Improvements:
+- build(deps): bump the ci group with 1 update
+ [#1256](https://github.com/fluxcd/source-controller/pull/1256)
+
+## 1.1.1
+
+**Release date:** 2023-09-18
+
+This is a patch release that fixes a regression introduced in v1.1.0 where
+HelmRepository objects would not be reconciled if they provided a TLS Secret
+using `.spec.secretRef` with a type other than `Opaque` or `kubernetes.io/tls`.
+
+In addition, the URL lookup strategy for Buckets has been changed from path to
+auto, to widen support for S3-compatible object storage services.
+
+Lastly, several dependencies have been updated to their latest versions.
+
+Fixes:
+- bucket: use auto lookup type
+ [#1222](https://github.com/fluxcd/source-controller/pull/1222)
+- helmrepo: fix Secret type check for TLS via `.spec.secretRef`
+ [#1225](https://github.com/fluxcd/source-controller/pull/1225)
+- Upgrade github.com/fluxcd/pkg/{git,git/gogit}
+ [#1236](https://github.com/fluxcd/source-controller/pull/1236)
+
+Improvements:
+- build(deps): bump the ci group dependencies
+ [#1213](https://github.com/fluxcd/source-controller/pull/1213)
+ [#1224](https://github.com/fluxcd/source-controller/pull/1224)
+ [#1230](https://github.com/fluxcd/source-controller/pull/1230)
+ [#1235](https://github.com/fluxcd/source-controller/pull/1235)
+- docs: Add missing pem-encoding reference
+ [#1216](https://github.com/fluxcd/source-controller/pull/1216)
+- build(deps): bump github.com/cyphar/filepath-securejoin from 0.2.3 to 0.2.4
+ [#1227](https://github.com/fluxcd/source-controller/pull/1227)
+
+## 1.1.0
+
+**Release date:** 2023-08-23
+
+This minor release comes with API changes, bug fixes and several new features.
+
+All APIs that accept TLS data have been modified to adopt Secrets of type
+`kubernetes.io/tls`. This includes:
+* HelmRepository: The field `.spec.secretRef` has been __deprecated__ in favor
+of a new field [`.spec.certSecretRef`](https://github.com/fluxcd/source-controller/blob/v1.1.0/docs/spec/v1beta2/helmrepositories.md#cert-secret-reference).
+ This field is also supported by OCI HelmRepositories.
+* OCIRepository: Support for the`caFile`, `keyFile` and `certFile` keys in the
+ Secret specified in [`.spec.certSecretRef`](https://github.com/fluxcd/source-controller/blob/v1.1.0/docs/spec/v1beta2/ocirepositories.md#cert-secret-reference)
+ have been __deprecated__ in favor of `ca.crt`, `tls.key` and `tls.crt`.
+ Also, the Secret now must be of type `Opaque` or `kubernete.io/tls`.
+* GitRepository: CA certificate can now be provided in the Secret sepcified in
+ `.spec.secretRef` using the `ca.crt` key, which takes precedence over the
+ existing `caFile` key.
+
+Furthermore, GitRepository has a couple of new features:
+* Proxy support: A new field [`.spec.proxySecretRef`](https://github.com/fluxcd/source-controller/blob/v1.1.0/docs/spec/v1/gitrepositories.md#proxy-secret-reference)
+ has been introduced which can be used to specify the proxy configuration to
+ use for all remote Git operations related to the particular object.
+* Tag verification: The field [`.spec.verification.mode`](https://github.com/fluxcd/source-controller/blob/v1.1.0/docs/spec/v1/gitrepositories.md#verification)
+ now supports the following values:
+ * HEAD: Verify the HEAD of the Git repository.
+ * Tag: Verify the tag specified in `.spec.ref`
+ * TagAndHead: Verify the tag specified in `.spec.ref` and the commit it
+ points to.
+
+Starting with this version, the controller now stops exporting an object's
+metrics as soon as the object has been deleted.
+
+In addition, the controller now consumes significantly less CPU and memory when
+reconciling Helm repository indexes.
+
+Lastly, a new flag `--interval-jitter-percentage` has been introduced which can
+be used to specify a jitter to the reconciliation interval in order to
+distribute the load more evenly when multiple objects are set up with the same
+interval.
+
+Improvements:
+- gitrepo: Add support for specifying proxy per `GitRepository`
+ [#1109](https://github.com/fluxcd/source-controller/pull/1109)
+- helmrepo: add `.spec.certSecretRef` for specifying TLS auth data
+ [#1160](https://github.com/fluxcd/source-controller/pull/1160)
+- Update docs on Azure identity
+ [#1167](https://github.com/fluxcd/source-controller/pull/1167)
+- gitrepo: document limitation of `spec.ref.name` with Azure Devops
+ [#1175](https://github.com/fluxcd/source-controller/pull/1175)
+- ocirepo: add cosign support for insecure HTTP registries
+ [#1176](https://github.com/fluxcd/source-controller/pull/1176)
+- Handle delete before adding finalizer
+ [#1177](https://github.com/fluxcd/source-controller/pull/1177)
+- Store Helm indexes in JSON format
+ [#1178](https://github.com/fluxcd/source-controller/pull/1178)
+- Unpin go-git and update to v5.8.1
+ [#1179](https://github.com/fluxcd/source-controller/pull/1179)
+- controller: jitter requeue interval
+ [#1184](https://github.com/fluxcd/source-controller/pull/1184)
+- cache: ensure new expiration is persisted
+ [#1185](https://github.com/fluxcd/source-controller/pull/1185)
+- gitrepo: add support for Git tag verification
+ [#1187](https://github.com/fluxcd/source-controller/pull/1187)
+- Update dependencies
+ [#1191](https://github.com/fluxcd/source-controller/pull/1191)
+- Adopt Kubernetes style TLS Secrets
+ [#1194](https://github.com/fluxcd/source-controller/pull/1194)
+- Update dependencies
+ [#1196](https://github.com/fluxcd/source-controller/pull/1196)
+- Helm OCI: Add support for TLS registries with self-signed certs
+ [#1197](https://github.com/fluxcd/source-controller/pull/1197)
+- Update dependencies
+ [#1202](https://github.com/fluxcd/source-controller/pull/1202)
+- Preserve url encoded path in normalized helm repository URL
+ [#1203](https://github.com/fluxcd/source-controller/pull/1203)
+- Fix link ref in API docs
+ [#1204](https://github.com/fluxcd/source-controller/pull/1204)
+
+Fixes:
+- Fix the helm cache arguments
+ [#1170](https://github.com/fluxcd/source-controller/pull/1170)
+- Delete stale metrics on object delete
+ [#1183](https://github.com/fluxcd/source-controller/pull/1183)
+- Disable system-wide git config in tests
+ [#1192](https://github.com/fluxcd/source-controller/pull/1192)
+- Fix links in API docs
+ [#1200](https://github.com/fluxcd/source-controller/pull/1200)
+
+## 1.0.1
+
+**Release date:** 2023-07-10
+
+This is a patch release that fixes the AWS authentication for cross-region ECR repositories.
+
+Fixes:
+- Update `fluxcd/pkg/oci` to fix ECR cross-region auth
+ [#1158](https://github.com/fluxcd/source-controller/pull/1158)
+
+## 1.0.0
+
+**Release date:** 2023-07-03
+
+This is the first stable release of the controller. From now on, this controller
+follows the [Flux 2 release cadence and support pledge](https://fluxcd.io/flux/releases/).
+
+Starting with this version, the build, release and provenance portions of the
+Flux project supply chain [provisionally meet SLSA Build Level 3](https://fluxcd.io/flux/security/slsa-assessment/).
+
+This release includes several minor changes that primarily focus on addressing
+forgotten and obsolete bits in the logic related to GitRepository objects.
+
+Including a removal of the `OptimizedGitClones` feature flag. If your
+Deployment is configured to disable this flag, you should remove it.
+
+In addition, dependencies have been updated to their latest version, including
+an update of Kubernetes to v1.27.3.
+
+For a comprehensive list of changes since `v0.36.x`, please refer to the
+changelog for [v1.0.0-rc.1](#100-rc1), [v1.0.0-rc.3](#100-rc3) and
+[`v1.0.0-rc.4`](#100-rc4).
+
+Improvements:
+- gitrepo: remove `OptimizedGitClones` as a feature gate
+ [#1124](https://github.com/fluxcd/source-controller/pull/1124)
+ [#1126](https://github.com/fluxcd/source-controller/pull/1126)
+- Update dependencies
+ [#1127](https://github.com/fluxcd/source-controller/pull/1127)
+ [#1147](https://github.com/fluxcd/source-controller/pull/1147)
+- Update Cosign to v2.1.0
+ [#1132](https://github.com/fluxcd/source-controller/pull/1132)
+- Align `go.mod` version with Kubernetes (Go 1.20)
+ [#1134](https://github.com/fluxcd/source-controller/pull/1134)
+- Add the verification key to the GitRepository verified status condition
+- [#1136](https://github.com/fluxcd/source-controller/pull/1136)
+- gitrepo: remove obsolete proxy docs
+ [#1144](https://github.com/fluxcd/source-controller/pull/1144)
+
+## 1.0.0-rc.5
+
+**Release date:** 2023-06-01
+
+This release candidate fixes a regression introduced in `1.0.0.-rc.4` where
+support for Git servers that exclusively use v2 of the wire protocol like Azure
+Devops and AWS CodeCommit was broken.
+
+Lastly, the controller's dependencies were updated to mitigate CVE-2023-33199.
+
+Improvements:
+- build(deps): bump github.com/sigstore/rekor from 1.1.1 to 1.2.0
+ [#1107](https://github.com/fluxcd/source-controller/pull/1107)
+
+Fixes:
+- Bump `fluxcd/pkg/git/gogit` to v0.12.0
+ [#1111](https://github.com/fluxcd/source-controller/pull/1111)
+
+## 1.0.0-rc.4
+
+**Release date:** 2023-05-26
+
+This release candidate comes with support for Kubernetes v1.27 and Cosign v2.
+It also enables the use of annotated Git tags with `.spec.ref.name` in
+`GitRepository`. Furthermore, it fixes a bug related to accessing Helm OCI
+charts on ACR using OIDC auth.
+
+Improvements:
+- build(deps): bump helm/kind-action from 1.5.0 to 1.7.0
+ [#1100](https://github.com/fluxcd/source-controller/pull/1100)
+- build(deps): bump sigstore/cosign-installer from 3.0.3 to 3.0.5
+ [#1101](https://github.com/fluxcd/source-controller/pull/1101)
+- build(deps): bump actions/setup-go from 4.0.0 to 4.0.1
+ [#1102](https://github.com/fluxcd/source-controller/pull/1102)
+- Update cosign to v2
+ [#1096](https://github.com/fluxcd/source-controller/pull/1096)
+- build(deps): bump github.com/sigstore/rekor from 0.12.1-0.20220915152154-4bb6f441c1b2 to 1.1.1
+ [#1083](https://github.com/fluxcd/source-controller/pull/1083)
+- Update controller-runtime and Kubernetes dependencies
+ [#1104](https://github.com/fluxcd/source-controller/pull/1104)
+- Update dependencies; switch to `go-git/go-git` and `pkg/tar`
+ [#1105](https://github.com/fluxcd/source-controller/pull/1105)
+
+## 1.0.0-rc.3
+
+**Release date:** 2023-05-12
+
+This release candidate introduces the verification of the Artifact digest in
+storage during reconciliation. This ensures that the Artifact is not tampered
+with after it was written to storage. When the digest does not match, the
+controller will emit a warning event and remove the file from storage, forcing
+the Artifact to be re-downloaded.
+
+In addition, files with executable permissions are now archived with their mode
+set to `0o744` instead of `0o644`. Allowing the extracted file to be executable
+by the user.
+
+Lastly, the controller's dependencies were updated to mitigate CVE-2023-1732
+and CVE-2023-2253, and the controller base image was updated to Alpine 3.18.
+
+Improvements:
+- Verify digest of Artifact in Storage
+ [#1088](https://github.com/fluxcd/source-controller/pull/1088)
+- build(deps): bump github.com/cloudflare/circl from 1.3.2 to 1.3.3
+ [#1092](https://github.com/fluxcd/source-controller/pull/1092)
+- build(deps): bump github.com/docker/distribution from 2.8.1+incompatible to 2.8.2+incompatible
+ [#1093](https://github.com/fluxcd/source-controller/pull/1093)
+- storage: set `0o744` for files with exec mode set
+ [#1094](https://github.com/fluxcd/source-controller/pull/1094)
+
+## 1.0.0-rc.2
+
+**Release date:** 2023-05-09
+
+This release candidate comes with various updates to the controller's dependencies,
+most notable, Helm was updated to v3.11.3.
+
+Improvements:
+- Update dependencies
+ [#1086](https://github.com/fluxcd/source-controller/pull/1086)
+- Set RecoverPanic globally across controllers
+ [#1077](https://github.com/fluxcd/source-controller/pull/1077)
+- Move controllers to internal/controller
+ [#1076](https://github.com/fluxcd/source-controller/pull/1076)
+
+## 1.0.0-rc.1
+
+**Release date:** 2023-03-30
+
+This release candidate promotes the `GitRepository` API from `v1beta2` to `v1`.
+The controller now supports horizontal scaling using
+sharding based on a label selector.
+
+In addition, support for Azure Workload Identity was added to
+`OCIRepositories`, `Buckets` and `HelmRepositories` when using `provider: azure`.
+
+### Highlights
+
+#### API changes
+
+The `GitRepository` kind was promoted from v1beta2 to v1 (GA) and deprecated fields were removed.
+
+The common types `Artifact`, `Conditions` and the `Source` interface were promoted to v1.
+
+The `gitrepositories.source.toolkit.fluxcd.io` CRD contains the following versions:
+- v1 (storage version)
+- v1beta2 (deprecated)
+- v1beta1 (deprecated)
+
+#### Upgrade procedure
+
+The `GitRepository` v1 API is backwards compatible with v1beta2, except for the following:
+- the deprecated field `.spec.gitImplementation` was removed
+- the unused field `.spec.accessFrom` was removed
+- the deprecated field `.status.contentConfigChecksum` was removed
+- the deprecated field `.status.artifact.checksum` was removed
+- the `.status.url` was removed in favor of the absolute `.status.artifact.url`
+
+To upgrade from v1beta2, after deploying the new CRD and controller,
+set `apiVersion: source.toolkit.fluxcd.io/v1` in the YAML files that
+contain `GitRepository` definitions and remove the deprecated fields if any.
+Bumping the API version in manifests can be done gradually.
+It is advised to not delay this procedure as the beta versions will be removed after 6 months.
+
+#### Sharding
+
+Starting with this release, the controller can be configured with
+`--watch-label-selector`, after which only objects with this label will
+be reconciled by the controller.
+
+This allows for horizontal scaling, where source-controller
+can be deployed multiple times with a unique label selector
+which is used as the sharding key.
+
+Note that this also requires configuration of the `--storage-adv-addr`
+to a unique address (in combination with a proper Service definition).
+This to ensure the Artifacts handled by the sharding controller point
+to a unique endpoint.
+
+In addition, Source object kinds which have a dependency on another
+kind (i.e. a HelmChart on a HelmRepository) need to have the same
+labels applied to work as expected.
+
+### Full changelog
+
+Improvements:
+- GA: Promote `GitRepository` API to `source.toolkit.fluxcd.io/v1`
+ [#1056](https://github.com/fluxcd/source-controller/pull/1056)
+- Add reconciler sharding capability based on label selector
+ [#1059](https://github.com/fluxcd/source-controller/pull/1059)
+- Support Azure Workload Identity
+ [#1048](https://github.com/fluxcd/source-controller/pull/1048)
+- Update dependencies
+ [#1062](https://github.com/fluxcd/source-controller/pull/1062)
+- Update workflows
+ [#1054](https://github.com/fluxcd/source-controller/pull/1054)
+
+## 0.36.1
+
+**Release date:** 2023-03-20
+
+This release fixes a bug where after reading a `.sourceignore` file in a
+subdirectory, the controller could start to ignore files from directories next
+to the directory the `.sourceignore` file was placed in.
+
+Fixes:
+- Update sourceignore to fix pattern domain bug
+ [#1050](https://github.com/fluxcd/source-controller/pull/1050)
+
+## 0.36.0
+
+**Release date:** 2023-03-08
+
+This release changes the format of the Artifact `Revision` field when using a
+GitRepository with a `.spec.ref.name` set (introduced in [`v0.35.0`](#0350)),
+changing it from `sha1:` to `@sha1:`. Offering a more
+precise reflection of the revision the Artifact was created from.
+
+In addition, `klog` is now configured to log using the same logger as the rest
+of the controller (providing a consistent log format).
+
+Lastly, the controller is now built using Go `1.20`, and the dependencies have
+been updated to their latest versions.
+
+Improvements:
+- Advertise absolute reference in Artifact for GitRepository name ref
+ [#1036](https://github.com/fluxcd/source-controller/pull/1036)
+- Update Go to 1.20
+ [#1040](https://github.com/fluxcd/source-controller/pull/1040)
+- Update dependencies
+ [#1040](https://github.com/fluxcd/source-controller/pull/1040)
+ [#1041](https://github.com/fluxcd/source-controller/pull/1041)
+ [#1043](https://github.com/fluxcd/source-controller/pull/1043)
+- Use `logger.SetLogger` to also configure `klog`
+ [#1044](https://github.com/fluxcd/source-controller/pull/1044)
+
+## 0.35.2
+
+**Release date:** 2023-02-23
+
+This release reduces the amount of memory consumed by the controller when
+reconciling HelmRepositories, by using only the digest of the YAML file as the
+Revision of the Artifact instead of the stable sorted version of the entire
+index. This aligns with the behavior before `v0.35.0`, and is therefore
+considered a bug fix.
+
+In addition, the dependencies have been updated to include some minor security
+patches.
+
+Note that `v0.35.0` contains breaking changes. Please refer to the [changelog
+entry](#0350) for more information.
+
+Fixes:
+- helm: only use Digest to calculcate index revision
+ [#1035](https://github.com/fluxcd/source-controller/pull/1035)
+
+Improvements:
+- Update dependencies
+ [#1036](https://github.com/fluxcd/source-controller/pull/1036)
+
+## 0.35.1
+
+**Release date:** 2023-02-17
+
+This release addresses a hypothetical issue with the Artifact `Digest` field
+validation, where a patch of the Artifact could fail to be applied to an object
+due to the lack of an `omitempty` tag on the optional field. In reality, this
+issue is not possible to encounter, as the `Digest` field is always set when
+the Artifact is created.
+
+Note that `v0.35.0` contains breaking changes. Please refer to the [changelog
+entry](#0350) for more information.
+
+Fixes:
+- api: omit empty Digest in Artifact
+ [#1031](https://github.com/fluxcd/source-controller/pull/1031)
+
+## 0.35.0
+
+**Release date:** 2023-02-16
+
+This release introduces a new format for the Artifact `Revision`, and deprecates
+the `Checksum` field in favor of a new `Digest` field. In addition, it adds
+support for Git reference names in a GitRepository, and comes with the usual
+collection of dependency updates.
+
+### Highlights
+
+#### Support for Git reference names
+
+Starting with this version, it is possible to define a [Git Reference](https://git-scm.com/book/en/v2/Git-Internals-Git-References)
+in a GitRepository using `.spec.ref.name`.
+
+This opens the door to a range of functionalities not available before, as it
+for example allows the controller to follow pull (`refs/pull//head`) or
+merge (`refs/merge-requests//head`) requests, and allows a transition from
+the HEAD of a branch (`refs/heads/main`) to a tag (`refs/tags/v0.1.0`) by
+changing a single field value.
+
+Refer to the [GitRepository specification](https://github.com/fluxcd/source-controller/blob/v0.35.0/docs/spec/v1beta2/gitrepositories.md#name-example)
+for more details.
+
+#### Introduction of Artifact Digest
+
+The Artifact of a Source will now advertise a `Digest` field containing the
+checksum of the file advertised in the `Path`, and the alias of the algorithm
+used to calculate it. Creating a "digest" in the format of `:`.
+
+The algorithm is configurable using the newly introduced `--artifact-digest-algo`
+flag, which allows configuration of other algorithms (`sha384`, `sha512`, and
+`blake3`) than the hardcoded `sha256` default of the [now deprecated `Checksum`
+field](#deprecation-of-artifact-checksum).
+
+Please note that until the `Checksum` is fully deprecated, changing the
+algorithm is not yet advised (albeit supported), as this will result in a
+double computation.
+
+### :warning: Breaking changes
+
+#### Artifact Revision format
+
+The `Revision` format for an Artifact consisting of a named pointer (a Git
+branch or tag) and/or a specific revision (a Git commit SHA or other calculated
+checksum) has changed to contain an `@` separator opposed to `/`, and includes
+the algorithm alias as a prefix to a checksum (creating a "digest").
+In addition, `HEAD` is no longer used as a named pointer for exact commit
+references, but will now only advertise the commit itself.
+
+For example:
+
+- `main/1eabc9a41ca088515cab83f1cce49eb43e84b67f` => `main@sha1:1eabc9a41ca088515cab83f1cce49eb43e84b67f`
+- `HEAD/5394cb7f48332b2de7c17dd8b8384bbc84b7e738` => `sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738`
+- `tag/55609ff9d959589ed917ce32e6bc0f0a36809565f308602c15c3668965979edc` => `tag@sha256:55609ff9d959589ed917ce32e6bc0f0a36809565f308602c15c3668965979edc`
+- `8fb62a09c9e48ace5463bf940dc15e85f525be4f230e223bbceef6e13024110c` => `sha256:8fb62a09c9e48ace5463bf940dc15e85f525be4f230e223bbceef6e13024110c`
+
+When the storage of the controller is backed by a Persistent Volume, the
+rollout of this new format happens for the next new revision the controller
+encounters. Otherwise, the new revision will be advertised as soon as the
+Artifact has been reproduced after the controller is deployed.
+
+Other Flux controllers making use of an Artifact are aware of the change in
+format, and work with it in a backwards compatible manner. Avoiding observing
+a change of revision when this is actually just a change of format. If you
+programmatically make use of the Revision, please refer to [the
+`TransformLegacyRevision` helper](https://github.com/fluxcd/source-controller/blob/api/v0.35.0/api/v1beta2/artifact_types.go#L121)
+to allow a transition period in your application.
+
+For more information around this change, refer to
+[RFC-0005](https://github.com/fluxcd/flux2/tree/main/rfcs/0005-artifact-revision-and-digest#establish-an-artifact-revision-format).
+
+#### Deprecation of Artifact Checksum
+
+The `Checksum` field of an Artifact has been deprecated in favor of the newly
+introduced `Digest`. Until the deprecated field is removed in the next version
+of the API, the controller will continue to produce the SHA-256 checksum in
+addition to the digest. Changing the algorithm used to produce the digest using
+`--artifact-digest-algo` is therefore not yet advised (albeit supported), as
+this will result in a double computation.
+
+For more information around this change, refer to
+[RFC-0005](https://github.com/fluxcd/flux2/tree/main/rfcs/0005-artifact-revision-and-digest#introduce-a-digest-field).
+
+### Full changelog
+
+Improvements:
+- Introduction of Digest and change of Revision format
+ [#1001](https://github.com/fluxcd/source-controller/pull/1001)
+- Improve HelmRepository type switching from default to oci
+ [#1016](https://github.com/fluxcd/source-controller/pull/1016)
+- Apply default permission mode to all files/dirs in an artifact archive
+ [#1020](https://github.com/fluxcd/source-controller/pull/1020)
+- Add support for checking out Git references
+ [#1026](https://github.com/fluxcd/source-controller/pull/1026)
+- Update dependencies
+ [#1025](https://github.com/fluxcd/source-controller/pull/1025)
+ [#1028](https://github.com/fluxcd/source-controller/pull/1028)
+ [#1030](https://github.com/fluxcd/source-controller/pull/1030)
+
+Fixes:
+- Normalize Helm repository URL with query params properly
+ [#1015](https://github.com/fluxcd/source-controller/pull/1015)
+- Prevent panic when cloning empty Git repository
+ [#1021](https://github.com/fluxcd/source-controller/pull/1021)
+
+## 0.34.0
+
+**Release date:** 2023-01-31
+
+This prerelease comes with support for HTTPS bearer token authentication for Git
+repository. The GitRepository authentication Secret is expected to contain the
+bearer token in `.data.bearerToken`.
+
+The caching of Secret and ConfigMap resources is disabled by
+default to improve memory usage. To opt-out from this behavior, start the
+controller with: `--feature-gates=CacheSecretsAndConfigMaps=true`.
+
+All the Source kinds now support progressive status updates. The progress made
+by the controller during reconciliation of a Source is reported immediately in
+the status of the Source object.
+
+In addition, the controller dependencies have been updated to Kubernetes v1.26.
+
+:warning: **Breaking change:** When using SSH authentication in GitRepository,
+if the referenced Secret contained `.data.username`, it was used as the SSH
+user. With this version, SSH user will be the username in the SSH address. For
+example, if the Git repository address is `ssh://flux@example.com`, `flux` will
+be used as the SSH user during SSH authentication. When no username is
+specified, `git` remains the default SSH user.
+
+Improvements:
+- Garbage collection lock file ignore tests
+ [#992](https://github.com/fluxcd/source-controller/pull/992)
+- purge minio test container at the end of tests
+ [#993](https://github.com/fluxcd/source-controller/pull/993)
+- Introduce Progressive status
+ [#974](https://github.com/fluxcd/source-controller/pull/974)
+- build(deps): bump github.com/containerd/containerd from 1.6.10 to 1.6.12
+ [#997](https://github.com/fluxcd/source-controller/pull/997)
+- fix typo in helmRepo secretRef spec CRD
+ [#996](https://github.com/fluxcd/source-controller/pull/996)
+- Fix OCIRepository testdata permissions
+ [#998](https://github.com/fluxcd/source-controller/pull/998)
+- Set rate limiter option in test reconcilers
+ [#999](https://github.com/fluxcd/source-controller/pull/999)
+- Update git dependencies for bearer token support
+ [#1003](https://github.com/fluxcd/source-controller/pull/1003)
+- Document support for bearer token authentication over https in gitrepositories
+ [#1000](https://github.com/fluxcd/source-controller/pull/1000)
+- Disable caching of secrets and configmaps
+ [#989](https://github.com/fluxcd/source-controller/pull/989)
+- Update dependencies
+ [#1008](https://github.com/fluxcd/source-controller/pull/1008)
+- build: Enable SBOM and SLSA Provenance
+ [#1009](https://github.com/fluxcd/source-controller/pull/1009)
+- Add note about sourceignore recursion
+ [#1007](https://github.com/fluxcd/source-controller/pull/1007)
+- CI: Replace engineerd/setup-kind with helm/kind-action
+ [#1010](https://github.com/fluxcd/source-controller/pull/1010)
+- helm/oci: Add context to chart download failure
+ [#1013](https://github.com/fluxcd/source-controller/pull/1013)
+
+## 0.33.0
+
+**Release date:** 2022-12-20
+
+This prerelease comes with dedicated mux for the controller's fileserver. All code references to `libgit2` were removed, and the `spec.gitImplementation`
+field is no longer being honored, but rather `go-git` is used.
+For more information, refer to version 0.32.0's changelog, which started `libgit2`'s
+deprecation process.
+
+The controller's garbage collection now takes into consideration
+lock files.
+
+The feature gate `ForceGoGitImplementation` was removed, users passing it as their
+controller's startup args will need to remove it before upgrading.
+
+Fixes:
+- git: Fix issue with recurseSubmodules
+ [#975](https://github.com/fluxcd/source-controller/pull/975)
+- Fix aliased chart dependencies resolution
+ [#988](https://github.com/fluxcd/source-controller/pull/988)
+
+Improvements:
+- fileserver: Use new ServeMux
+ [#972](https://github.com/fluxcd/source-controller/pull/972)
+- Remove libgit2 and git2go from codebase
+ [#977](https://github.com/fluxcd/source-controller/pull/977)
+- Use Event v1 API metadata keys in notifications
+ [#990](https://github.com/fluxcd/source-controller/pull/990)
+- storage: take lock files into consideration while garbage collecting
+ [#991](https://github.com/fluxcd/source-controller/pull/991)
+- Migrate to Go Native fuzz and improve reliability
+ [#965](https://github.com/fluxcd/source-controller/pull/965)
+- build: Add tidy to make verify
+ [#966](https://github.com/fluxcd/source-controller/pull/966)
+- build: Add postbuild script for fuzzing
+ [#968](https://github.com/fluxcd/source-controller/pull/968)
+- build: Link libgit2 via LIB_FUZZING_ENGINE
+ [#969](https://github.com/fluxcd/source-controller/pull/969)
+- GitRepo: git impl. deprecation test cleanup
+ [#980](https://github.com/fluxcd/source-controller/pull/980)
+- minio: use container image for tests
+ [#981](https://github.com/fluxcd/source-controller/pull/981)
+- helm: Update SDK to v3.10.3
+ [#982](https://github.com/fluxcd/source-controller/pull/982)
+- Update fluxcd/pkg/oci dependency
+ [#983](https://github.com/fluxcd/source-controller/pull/983)
+- Update dependencies
+ [#985](https://github.com/fluxcd/source-controller/pull/985)
+
+## 0.32.1
+
+**Release date:** 2022-11-18
+
+This prerelease rectifies the `v0.32.0` release by retracting the previous Go
+version, bumping the controller api version and the controller deployment.
+
+## 0.32.0
+
+**Release date:** 2022-11-17
+
+This prerelease comes with a major refactoring of the controller's Git operations.
+The `go-git` implementation now supports all Git servers, including
+Azure DevOps, which previously was only supported by `libgit2`.
+
+This version initiates the soft deprecation of the `libgit2` implementation.
+The motivation for removing support for `libgit2` being:
+- Reliability: over the past months we managed to substantially reduce the
+issues users experienced, but there are still crashes happening when the controller
+runs over longer periods of time, or when under intense GC pressure.
+- Performance: due to the inherit nature of `libgit2` implementation, which
+is a C library called via CGO through `git2go`, it will never perform as well as
+a pure Go implementations. At scale, memory pressure insues which then triggers
+the reliability issues above.
+- Lack of Shallow Clone Support.
+- Maintainability: supporting two Git implementations is a big task, even more
+so when one of them is in a complete different tech stack. Given its nature, to
+support `libgit2`, we have to maintain an additional repository. Statically built
+`libgit2` libraries need to be cross-compiled for all our supported platforms.
+And a lot of "unnecessary" code has to be in place to make building, testing and
+fuzzing work seamlessly.
+
+As a result the field `spec.gitImplementation` is ignored and the
+reconciliations will use `go-git`. To opt-out from this behaviour, start
+the controller with: `--feature-gates=ForceGoGitImplementation=false`.
+
+Users having any issues with `go-git` should report it to the Flux team,
+so any issues can be resolved before support for `libgit2` is completely
+removed from the codebase.
+
+Improvements:
+- Refactor Git operations and introduce go-git support for Azure DevOps and AWS CodeCommit
+ [#944](https://github.com/fluxcd/source-controller/pull/944)
+- Use Flux Event API v1beta1
+ [#952](https://github.com/fluxcd/source-controller/pull/952)
+- gogit: Add new ForceGoGitImplementation FeatureGate
+ [#945](https://github.com/fluxcd/source-controller/pull/945)
+- Remove nsswitch.conf creation from Dockerfile
+ [#958](https://github.com/fluxcd/source-controller/pull/958)
+- Update dependencies
+ [#960](https://github.com/fluxcd/source-controller/pull/960)
+ [#950](https://github.com/fluxcd/source-controller/pull/950)
+ [#959](https://github.com/fluxcd/source-controller/pull/959)
+- Upgrade to azure-sdk-for-go/storage/azblob v0.5.1
+ [#931](https://github.com/fluxcd/source-controller/pull/931)
+
+## 0.31.0
+
+**Release date:** 2022-10-21
+
+This prerelease comes with support for Cosign verification of Helm charts.
+The signatures verification can be configured by setting `HelmChart.spec.verify` with
+`provider` as `cosign` and a `secretRef` to a secret containing the public key.
+Cosign keyless verification is also supported, please see the
+[HelmChart API documentation](https://github.com/fluxcd/source-controller/blob/api/v0.31.0/docs/spec/v1beta2/helmcharts.md#verification)
+for more details.
+
+In addition, the controller dependencies have been updated
+to Kubernetes v1.25.3 and Helm v3.10.1.
+
+Improvements:
+- Implement Cosign verification for HelmCharts
+ [#925](https://github.com/fluxcd/source-controller/pull/925)
+- Update dependencies
+ [#942](https://github.com/fluxcd/source-controller/pull/942)
+
+Fixes:
+- Allow deleting suspended objects
+ [#937](https://github.com/fluxcd/source-controller/pull/937)
+
+## 0.30.1
+
+**Release date:** 2022-10-10
+
+This prerelease enables the use of container-level SAS tokens when using `Bucket` objects
+to access Azure Storage. The Azure SDK error message has also been enriched to hint Flux
+users the potential reasons in case of failure.
+
+Improvements:
+- List objects when checking if bucket exists to allow use of container-level SAS token
+ [#906](https://github.com/fluxcd/source-controller/pull/906)
+
+## 0.30.0
+
+**Release date:** 2022-09-29
+
+This prerelease adds support for Cosign verification in `OCIRepository` source.
+The signatures verification can be configured by setting `OCIRepository.spec.verify` with
+`provider` as `cosign` and a `secretRef` to a secret containing the public key.
+Cosign keyless verification is also supported, please see the
+[OCIRepository API documentation](https://github.com/fluxcd/source-controller/blob/api/v0.30.0/docs/spec/v1beta2/ocirepositories.md#verification)
+for more details.
+
+It also comes with strict validation rules for API fields which define a
+(time) duration. Effectively, this means values without a time unit (e.g. `ms`,
+`s`, `m`, `h`) will now be rejected by the API server. To stimulate sane
+configurations, the units `ns`, `us` and `µs` can no longer be configured, nor
+can `h` be set for fields defining a timeout value.
+
+In addition, the controller dependencies have been updated
+to Kubernetes controller-runtime v0.13.
+
+:warning: **Breaking changes:**
+- `.spec.interval` new validation pattern is `"^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"`
+- `.spec.timeout` new validation pattern is `"^([0-9]+(\\.[0-9]+)?(ms|s|m))+$"`
+
+Improvements:
+- api: add custom validation for v1.Duration types
+ [#903](https://github.com/fluxcd/source-controller/pull/903)
+- [RFC-0003] Implement OCIRepository verification using Cosign
+ [#876](https://github.com/fluxcd/source-controller/pull/876)
+- Consider bipolarity conditions in Ready condition summarization
+ [#907](https://github.com/fluxcd/source-controller/pull/907)
+- Update Bucket related SDK dependencies
+ [#911](https://github.com/fluxcd/source-controller/pull/911)
+- Add custom CA certificates to system certificates
+ [#904](https://github.com/fluxcd/source-controller/pull/904)
+- [OCIRepository] Optimise OCI artifacts reconciliation
+ [#913](https://github.com/fluxcd/source-controller/pull/913)
+- Update dependencies
+ [#919](https://github.com/fluxcd/source-controller/pull/919)
+- Build with Go 1.19
+ [#920](https://github.com/fluxcd/source-controller/pull/920)
+- Bump libgit2 image and disable cosign verification for CI
+ [#921](https://github.com/fluxcd/source-controller/pull/921)
+- OCIRepositoryReconciler no-op improvements
+ [#917](https://github.com/fluxcd/source-controller/pull/917)
+- Accept a slice of remote.Option for cosign verification
+ [#916](https://github.com/fluxcd/source-controller/pull/916)
+- Update pkg/oci to v0.11.0
+ [#922](https://github.com/fluxcd/source-controller/pull/922)
+
+Fixes:
+- Handle nil OCI authenticator with malformed registry
+ [#897](https://github.com/fluxcd/source-controller/pull/897)
+
+## 0.29.0
+
+**Release date:** 2022-09-09
+
+This prerelease adds support for non-TLS container registries such
+as [Kubernetes Kind Docker Registry](https://kind.sigs.k8s.io/docs/user/local-registry/).
+Connecting to an in-cluster registry over plain HTTP,
+requires setting the `OCIRepository.spec.insecure` field to `true`.
+
+:warning: **Breaking change:** The controller logs have been aligned
+with the Kubernetes structured logging. For more details on the new logging
+structure please see: [fluxcd/flux2#3051](https://github.com/fluxcd/flux2/issues/3051).
+
+Improvements:
+- Align controller logs to Kubernetes structured logging
+ [#882](https://github.com/fluxcd/source-controller/pull/882)
+- [OCIRepository] Add support for non-TLS insecure container registries
+ [#881](https://github.com/fluxcd/source-controller/pull/881)
+- Fuzz optimisations
+ [#886](https://github.com/fluxcd/source-controller/pull/886)
+
+Fixes:
+- [OCI] Static credentials should take precedence over the OIDC provider
+ [#884](https://github.com/fluxcd/source-controller/pull/884)
+
+## 0.28.0
+
+**Release date:** 2022-08-29
+
+This prerelease adds support for contextual login to container registries when pulling
+Helm charts from Azure Container Registry, Amazon Elastic Container Registry
+and Google Artifact Registry. Contextual login for `HelmRepository`
+objects can be enabled by setting the `spec.provider` field to `azure`, `aws` or `gcp`.
+
+Selecting the OCI layer containing Kubernetes manifests is now possible
+when defining `OCIRepository` objects by setting the `spec.layerSelector.mediaType` field.
+
+In addition, the controller dependencies have been updated to Kubernetes v1.25.0 and Helm v3.9.4.
+
+Improvements:
+- [HelmRepository] Enable contextual login for OCI
+ [#873](https://github.com/fluxcd/source-controller/pull/873)
+- [OCIRepository] Select layer by media type
+ [#871](https://github.com/fluxcd/source-controller/pull/871)
+- Update Kubernetes packages to v1.25.0
+ [#875](https://github.com/fluxcd/source-controller/pull/875)
+- Update dependencies
+ [#869](https://github.com/fluxcd/source-controller/pull/869)
+- Ensure Go 1.18 for fuzz image
+ [#872](https://github.com/fluxcd/source-controller/pull/872)
+
+## 0.27.0
+
+**Release date:** 2022-08-17
+
+This prerelease adds support for SAS Keys when authenticating against Azure Blob Storage
+and improves the documentation for `OCIRepository`.
+
+The package `sourceignore`, which is used for excluding files from Flux internal artifacts,
+has been moved to `fluxcd/pkg/sourceignore`.
+
+Improvements:
+- OCIRepo docs: auto-login setup details
+ [#862](https://github.com/fluxcd/source-controller/pull/862)
+- Add Support for SAS keys in Azure Blob
+ [#738](https://github.com/fluxcd/source-controller/pull/738)
+- Use sourceignore from fluxcd/pkg/sourceignore
+ [#864](https://github.com/fluxcd/source-controller/pull/864)
+- Update dependencies
+ [#869](https://github.com/fluxcd/source-controller/pull/869)
+
## 0.26.1
**Release date:** 2022-08-11
@@ -170,7 +1574,7 @@ Improvements:
This prerelease fixes a regression for SSH host key verification
and fixes semver sorting for Helm OCI charts.
-In addition, the controller dependencies where update to Kubernetes v1.24.1.
+In addition, the controller dependencies have been updated to Kubernetes v1.24.1.
Fixes:
- helm: Fix sorting semver from OCI repository tags
@@ -1816,7 +3220,7 @@ using the [notification.fluxcd.io API](https://github.com/fluxcd/notification-co
**Release date:** 2020-06-24
This is the first prerelease ready for public testing. To get started
-testing, see the [GitOps Toolkit guide](https://fluxcd.io/docs/get-started/).
+testing, see the [GitOps Toolkit guide](https://fluxcd.io/flux/get-started/).
## 0.0.1-beta.2
diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md
index 18b5829c9..11d05ad83 100644
--- a/DEVELOPMENT.md
+++ b/DEVELOPMENT.md
@@ -1,6 +1,6 @@
# Development
-> **Note:** Please take a look at
+> **Note:** Please take a look at
> to find out about how to contribute to Flux and how to interact with the
> Flux Development team.
@@ -13,30 +13,9 @@ There are a number of dependencies required to be able to run the controller and
- [Install Docker](https://docs.docker.com/engine/install/)
- (Optional) [Install Kubebuilder](https://book.kubebuilder.io/quick-start.html#installation)
-The [libgit2](https://libgit2.org/) dependency is now automatically managed by the Makefile logic.
-However, it depends on [pkg-config](https://freedesktop.org/wiki/Software/pkg-config/) being installed:
-
-### macOS
-
-```console
-$ # Ensure pkg-config is installed
-$ brew install pkg-config
-```
-
-### Linux
-
-```console
-$ # Ensure pkg-config is installed
-$ pacman -S pkgconf
-```
-
-**Note:** Example shown is for Arch Linux, but likewise procedure can be
-followed using any other package manager. Some distributions may have slight
-variation of package names (e.g. `apt install -y pkg-config`).
-
In addition to the above, the following dependencies are also used by some of the `make` targets:
-- `controller-gen` (v0.7.0)
+- `controller-gen` (v0.19.0)
- `gen-crd-api-reference-docs` (v0.3.0)
- `setup-envtest` (latest)
@@ -45,7 +24,7 @@ If any of the above dependencies are not present on your system, the first invoc
## How to run the test suite
Prerequisites:
-* Go >= 1.18
+* Go >= 1.25
You can run the test suite by simply doing
@@ -79,7 +58,7 @@ make run
### Building the container image
-Set the name of the container image to be created from the source code. This will be used
+Set the name of the container image to be created from the source code. This will be used
when building, pushing and referring to the image on YAML files:
```sh
@@ -100,7 +79,7 @@ make docker-push
```
Alternatively, the three steps above can be done in a single line:
-
+
```sh
IMG=registry-path/source-controller TAG=latest BUILD_ARGS=--push \
make docker-build
@@ -149,18 +128,12 @@ Create a `.vscode/launch.json` file:
"type": "go",
"request": "launch",
"mode": "auto",
- "envFile": "${workspaceFolder}/build/.env",
- "program": "${workspaceFolder}/main.go"
+ "program": "${workspaceFolder}/main.go",
+ "args": ["--storage-adv-addr=:0", "--storage-path=${workspaceFolder}/bin/data"]
}
]
}
```
-Create the environment file containing details on how to load
-`libgit2` dependencies:
-```bash
-make env
-```
-
Start debugging by either clicking `Run` > `Start Debugging` or using
the relevant shortcut.
diff --git a/Dockerfile b/Dockerfile
index 0c5f645d7..0f7c6f849 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,28 +1,15 @@
-ARG BASE_VARIANT=alpine
-ARG GO_VERSION=1.18
-ARG XX_VERSION=1.1.2
-
-ARG LIBGIT2_IMG=ghcr.io/fluxcd/golang-with-libgit2-only
-ARG LIBGIT2_TAG=v0.2.0
-
-FROM ${LIBGIT2_IMG}:${LIBGIT2_TAG} AS libgit2-libs
+ARG GO_VERSION=1.25
+ARG XX_VERSION=1.6.1
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
-FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-${BASE_VARIANT} as gostable
-
-FROM gostable AS go-linux
-
-# Build-base consists of build platform dependencies and xx.
-# These will be used at current arch to yield execute the cross compilations.
-FROM go-${TARGETOS} AS build-base
-
-RUN apk add --no-cache clang lld pkgconfig
+# Docker buildkit multi-arch build requires golang alpine
+FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS builder
+# Copy the build utilities.
COPY --from=xx / /
-# build-go-mod can still be cached at build platform architecture.
-FROM build-base as build-go-mod
+ARG TARGETPLATFORM
# Configure workspace
WORKDIR /workspace
@@ -37,56 +24,24 @@ COPY go.sum go.sum
# Cache modules
RUN go mod download
-
-# Build stage install per target platform
-# dependency and effectively cross compile the application.
-FROM build-go-mod as build
-
-ARG TARGETPLATFORM
-
-COPY --from=libgit2-libs /usr/local/ /usr/local/
-
-# Some dependencies have to installed
-# for the target platform: https://github.com/tonistiigi/xx#go--cgo
-RUN xx-apk add musl-dev gcc clang lld
-
-WORKDIR /workspace
-
# Copy source code
COPY main.go main.go
-COPY controllers/ controllers/
-COPY pkg/ pkg/
COPY internal/ internal/
ARG TARGETPLATFORM
ARG TARGETARCH
-ENV CGO_ENABLED=1
-
-# Instead of using xx-go, (cross) compile with vanilla go leveraging musl tool chain.
-RUN export PKG_CONFIG_PATH="/usr/local/$(xx-info triple)/lib/pkgconfig" && \
- export CGO_LDFLAGS="$(pkg-config --static --libs --cflags libgit2) -static -fuse-ld=lld" && \
- xx-go build \
- -ldflags "-s -w" \
- -tags 'netgo,osusergo,static_build' \
- -o /source-controller -trimpath main.go;
-# Ensure that the binary was cross-compiled correctly to the target platform.
-RUN xx-verify --static /source-controller
+# build without specifing the arch
+ENV CGO_ENABLED=0
+RUN xx-go build -trimpath -a -o source-controller main.go
-
-FROM alpine:3.16
+FROM alpine:3.22
ARG TARGETPLATFORM
RUN apk --no-cache add ca-certificates \
&& update-ca-certificates
-# Create minimal nsswitch.conf file to prioritize the usage of /etc/hosts over DNS queries.
-# https://github.com/gliderlabs/docker-alpine/issues/367#issuecomment-354316460
-RUN [ ! -e /etc/nsswitch.conf ] && echo 'hosts: files dns' > /etc/nsswitch.conf
-
-# Copy over binary from build
-COPY --from=build /source-controller /usr/local/bin/
-COPY ATTRIBUTIONS.md /
+COPY --from=builder /workspace/source-controller /usr/local/bin/
USER 65534:65534
ENTRYPOINT [ "source-controller" ]
diff --git a/MAINTAINERS b/MAINTAINERS
index 7b896b063..3a1bb4156 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7,6 +7,4 @@ from the main Flux v2 git repository, as listed in
https://github.com/fluxcd/flux2/blob/main/MAINTAINERS
-In alphabetical order:
-
-Paulo Gomes, Weaveworks (github: @pjbgf, slack: pjbgf)
+Dipti Pai, Microsoft (github: @dipti-pai, slack: Dipti Pai)
diff --git a/Makefile b/Makefile
index 47b44a0a4..28226af5d 100644
--- a/Makefile
+++ b/Makefile
@@ -2,16 +2,15 @@
IMG ?= fluxcd/source-controller
TAG ?= latest
-# Base image used to build the Go binary
-LIBGIT2_IMG ?= ghcr.io/fluxcd/golang-with-libgit2-only
-LIBGIT2_TAG ?= v0.2.0
-
# Allows for defining additional Go test args, e.g. '-tags integration'.
GO_TEST_ARGS ?= -race
# Allows for filtering tests based on the specified prefix
GO_TEST_PREFIX ?=
+# Defines whether cosign verification should be skipped.
+SKIP_COSIGN_VERIFICATION ?= false
+
# Allows for defining additional Docker buildx arguments,
# e.g. '--push'.
BUILD_ARGS ?=
@@ -30,21 +29,17 @@ REPOSITORY_ROOT := $(shell git rev-parse --show-toplevel)
BUILD_DIR := $(REPOSITORY_ROOT)/build
# Other dependency versions
-ENVTEST_BIN_VERSION ?= 1.19.2
+ENVTEST_BIN_VERSION ?= 1.24.0
-# Caches libgit2 versions per tag, "forcing" rebuild only when needed.
-LIBGIT2_PATH := $(BUILD_DIR)/libgit2/$(LIBGIT2_TAG)
-LIBGIT2_LIB_PATH := $(LIBGIT2_PATH)/lib
-LIBGIT2 := $(LIBGIT2_LIB_PATH)/libgit2.a
+# FUZZ_TIME defines the max amount of time, in Go Duration,
+# each fuzzer should run for.
+FUZZ_TIME ?= 1m
-export CGO_ENABLED=1
-export PKG_CONFIG_PATH=$(LIBGIT2_LIB_PATH)/pkgconfig
-export CGO_LDFLAGS=$(shell PKG_CONFIG_PATH=$(PKG_CONFIG_PATH) pkg-config --libs --static --cflags libgit2 2>/dev/null)
GO_STATIC_FLAGS=-ldflags "-s -w" -tags 'netgo,osusergo,static_build$(addprefix ,,$(GO_TAGS))'
# API (doc) generation utilities
-CONTROLLER_GEN_VERSION ?= v0.7.0
-GEN_API_REF_DOCS_VERSION ?= v0.3.0
+CONTROLLER_GEN_VERSION ?= v0.19.0
+GEN_API_REF_DOCS_VERSION ?= e327d0730470cbd61b06300f81c5fcf91c23c113
# If gobin not set, create one on ./build and add to path.
ifeq (,$(shell go env GOBIN))
@@ -66,40 +61,38 @@ ifeq ($(shell uname -s),Darwin)
ENVTEST_ARCH=amd64
endif
-all: build
+all: manager
-build: check-deps $(LIBGIT2) ## Build manager binary
+# Build manager binary
+manager: generate fmt vet
go build $(GO_STATIC_FLAGS) -o $(BUILD_DIR)/bin/manager main.go
KUBEBUILDER_ASSETS?="$(shell $(ENVTEST) --arch=$(ENVTEST_ARCH) use -i $(ENVTEST_KUBERNETES_VERSION) --bin-dir=$(ENVTEST_ASSETS_DIR) -p path)"
-test: $(LIBGIT2) install-envtest test-api check-deps ## Run all tests
+test: install-envtest test-api ## Run all tests
HTTPS_PROXY="" HTTP_PROXY="" \
KUBEBUILDER_ASSETS=$(KUBEBUILDER_ASSETS) \
GIT_CONFIG_GLOBAL=/dev/null \
+ GIT_CONFIG_NOSYSTEM=true \
go test $(GO_STATIC_FLAGS) \
./... \
$(GO_TEST_ARGS) \
-coverprofile cover.out
-test-ctrl: $(LIBGIT2) install-envtest test-api check-deps ## Run controller tests
+test-ctrl: install-envtest test-api ## Run controller tests
HTTPS_PROXY="" HTTP_PROXY="" \
KUBEBUILDER_ASSETS=$(KUBEBUILDER_ASSETS) \
GIT_CONFIG_GLOBAL=/dev/null \
go test $(GO_STATIC_FLAGS) \
-run "^$(GO_TEST_PREFIX).*" \
- -v ./controllers \
+ -v ./internal/controller \
-coverprofile cover.out
-check-deps:
-ifeq ($(shell uname -s),Darwin)
- if ! command -v pkg-config &> /dev/null; then echo "pkg-config is required"; exit 1; fi
-endif
-
test-api: ## Run api tests
cd api; go test $(GO_TEST_ARGS) ./... -coverprofile cover.out
-run: $(LIBGIT2) generate fmt vet manifests ## Run against the configured Kubernetes cluster in ~/.kube/config
- go run $(GO_STATIC_FLAGS) ./main.go
+run: generate fmt vet manifests ## Run against the configured Kubernetes cluster in ~/.kube/config
+ @mkdir -p $(PWD)/bin/data
+ go run $(GO_STATIC_FLAGS) ./main.go --storage-adv-addr=:0 --storage-path=$(PWD)/bin/data
install: manifests ## Install CRDs into a cluster
kustomize build config/crd | kubectl apply -f -
@@ -122,18 +115,17 @@ manifests: controller-gen ## Generate manifests, e.g. CRD, RBAC, etc.
cd api; $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role paths="./..." output:crd:artifacts:config="../config/crd/bases"
api-docs: gen-crd-api-reference-docs ## Generate API reference documentation
- $(GEN_CRD_API_REFERENCE_DOCS) -api-dir=./api/v1beta2 -config=./hack/api-docs/config.json -template-dir=./hack/api-docs/template -out-file=./docs/api/source.md
+ $(GEN_CRD_API_REFERENCE_DOCS) -api-dir=./api/v1 -config=./hack/api-docs/config.json -template-dir=./hack/api-docs/template -out-file=./docs/api/v1/source.md
tidy: ## Run go mod tidy
- cd api; rm -f go.sum; go mod tidy -compat=1.18
- rm -f go.sum; go mod tidy -compat=1.18
+ cd api; rm -f go.sum; go mod tidy -compat=1.25
+ rm -f go.sum; go mod tidy -compat=1.25
fmt: ## Run go fmt against code
go fmt ./...
cd api; go fmt ./...
- cd tests/fuzz; go fmt .
-vet: $(LIBGIT2) ## Run go vet against code
+vet: ## Run go vet against code
go vet ./...
cd api; go vet ./...
@@ -142,8 +134,6 @@ generate: controller-gen ## Generate API code
docker-build: ## Build the Docker image
docker buildx build \
- --build-arg LIBGIT2_IMG=$(LIBGIT2_IMG) \
- --build-arg LIBGIT2_TAG=$(LIBGIT2_TAG) \
--platform=$(BUILD_PLATFORMS) \
-t $(IMG):$(TAG) \
$(BUILD_ARGS) .
@@ -155,13 +145,13 @@ docker-push: ## Push Docker image
CONTROLLER_GEN = $(GOBIN)/controller-gen
.PHONY: controller-gen
controller-gen: ## Download controller-gen locally if necessary.
- $(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.7.0)
+ $(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_GEN_VERSION))
# Find or download gen-crd-api-reference-docs
GEN_CRD_API_REFERENCE_DOCS = $(GOBIN)/gen-crd-api-reference-docs
.PHONY: gen-crd-api-reference-docs
gen-crd-api-reference-docs: ## Download gen-crd-api-reference-docs locally if necessary
- $(call go-install-tool,$(GEN_CRD_API_REFERENCE_DOCS),github.com/ahmetb/gen-crd-api-reference-docs@3f29e6853552dcf08a8e846b1225f275ed0f3e3b)
+ $(call go-install-tool,$(GEN_CRD_API_REFERENCE_DOCS),github.com/ahmetb/gen-crd-api-reference-docs@$(GEN_API_REF_DOCS_VERSION))
ENVTEST = $(GOBIN)/setup-envtest
.PHONY: envtest
@@ -176,40 +166,14 @@ install-envtest: setup-envtest ## Download envtest binaries locally.
# setup-envtest sets anything below k8s to 0555
chmod -R u+w $(BUILD_DIR)/testbin
-libgit2: $(LIBGIT2) ## Detect or download libgit2 library
-
-COSIGN = $(GOBIN)/cosign
-$(LIBGIT2):
- $(call go-install-tool,$(COSIGN),github.com/sigstore/cosign/cmd/cosign@latest)
-
- IMG=$(LIBGIT2_IMG) TAG=$(LIBGIT2_TAG) PATH=$(PATH):$(GOBIN) ./hack/install-libraries.sh
-
-
.PHONY: help
help: ## Display this help menu
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-20s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
-update-attributions:
- ./hack/update-attributions.sh
-
e2e:
./hack/ci/e2e.sh
-verify: update-attributions fmt vet manifests api-docs
-ifneq ($(shell grep -o 'LIBGIT2_IMG ?= \w.*' Makefile | cut -d ' ' -f 3):$(shell grep -o 'LIBGIT2_TAG ?= \w.*' Makefile | cut -d ' ' -f 3), \
- $(shell grep -o "LIBGIT2_IMG=\w.*" Dockerfile | cut -d'=' -f2):$(shell grep -o "LIBGIT2_TAG=\w.*" Dockerfile | cut -d'=' -f2))
- @{ \
- echo "LIBGIT2_IMG and LIBGIT2_TAG must match in both Makefile and Dockerfile"; \
- exit 1; \
- }
-endif
-ifneq ($(shell grep -o 'LIBGIT2_TAG ?= \w.*' Makefile | cut -d ' ' -f 3), $(shell grep -o "LIBGIT2_TAG=.*" tests/fuzz/oss_fuzz_build.sh | sed 's;LIBGIT2_TAG="$${LIBGIT2_TAG:-;;g' | sed 's;}";;g'))
- @{ \
- echo "LIBGIT2_TAG must match in both Makefile and tests/fuzz/oss_fuzz_build.sh"; \
- exit 1; \
- }
-endif
-
+verify: fmt vet manifests api-docs tidy
@if [ ! "$$(git status --porcelain --untracked-files=no)" = "" ]; then \
echo "working directory is dirty:"; \
git --no-pager diff; \
@@ -224,38 +188,33 @@ TMP_DIR=$$(mktemp -d) ;\
cd $$TMP_DIR ;\
go mod init tmp ;\
echo "Downloading $(2)" ;\
-env -i bash -c "GOBIN=$(GOBIN) PATH=$(PATH) GOPATH=$(shell go env GOPATH) GOCACHE=$(shell go env GOCACHE) go install $(2)" ;\
+env -i bash -c "GOBIN=$(GOBIN) PATH=\"$(PATH)\" GOPATH=$(shell go env GOPATH) GOCACHE=$(shell go env GOCACHE) go install $(2)" ;\
rm -rf $$TMP_DIR ;\
}
endef
-# Build fuzzers
-fuzz-build: $(LIBGIT2)
- rm -rf $(BUILD_DIR)/fuzz/
- mkdir -p $(BUILD_DIR)/fuzz/out/
+# Build fuzzers used by oss-fuzz.
+fuzz-build:
+ rm -rf $(shell pwd)/build/fuzz/
+ mkdir -p $(shell pwd)/build/fuzz/out/
docker build . --tag local-fuzzing:latest -f tests/fuzz/Dockerfile.builder
docker run --rm \
-e FUZZING_LANGUAGE=go -e SANITIZER=address \
-e CIFUZZ_DEBUG='True' -e OSS_FUZZ_PROJECT_NAME=fluxcd \
- -v "$(BUILD_DIR)/fuzz/out":/out \
+ -v "$(shell pwd)/build/fuzz/out":/out \
local-fuzzing:latest
+# Run each fuzzer once to ensure they will work when executed by oss-fuzz.
fuzz-smoketest: fuzz-build
docker run --rm \
- -v "$(BUILD_DIR)/fuzz/out":/out \
+ -v "$(shell pwd)/build/fuzz/out":/out \
-v "$(shell pwd)/tests/fuzz/oss_fuzz_run.sh":/runner.sh \
local-fuzzing:latest \
bash -c "/runner.sh"
-# Creates an env file that can be used to load all source-controller's dependencies
-# this is handy when you want to run adhoc debug sessions on tests or start the
-# controller in a new debug session.
-env: $(LIBGIT2)
- echo 'GO_ENABLED="1"' > $(BUILD_DIR)/.env
- echo 'PKG_CONFIG_PATH="$(PKG_CONFIG_PATH)"' >> $(BUILD_DIR)/.env
- echo 'LIBRARY_PATH="$(LIBRARY_PATH)"' >> $(BUILD_DIR)/.env
- echo 'CGO_CFLAGS="$(CGO_CFLAGS)"' >> $(BUILD_DIR)/.env
- echo 'CGO_LDFLAGS="$(CGO_LDFLAGS)"' >> $(BUILD_DIR)/.env
- echo 'KUBEBUILDER_ASSETS=$(KUBEBUILDER_ASSETS)' >> $(BUILD_DIR)/.env
- echo 'GIT_CONFIG_GLOBAL=/dev/null' >> $(BUILD_DIR)/.env
+# Run fuzz tests for the duration set in FUZZ_TIME.
+fuzz-native:
+ KUBEBUILDER_ASSETS=$(KUBEBUILDER_ASSETS) \
+ FUZZ_TIME=$(FUZZ_TIME) \
+ ./tests/fuzz/native_go_run.sh
diff --git a/PROJECT b/PROJECT
index 10d980ac1..9d89d81be 100644
--- a/PROJECT
+++ b/PROJECT
@@ -1,12 +1,21 @@
domain: toolkit.fluxcd.io
repo: github.com/fluxcd/source-controller
resources:
+- group: source
+ kind: GitRepository
+ version: v1
- group: source
kind: GitRepository
version: v1beta2
+- group: source
+ kind: HelmRepository
+ version: v1
- group: source
kind: HelmRepository
version: v1beta2
+- group: source
+ kind: HelmChart
+ version: v1
- group: source
kind: HelmChart
version: v1beta2
@@ -28,4 +37,13 @@ resources:
- group: source
kind: OCIRepository
version: v1beta2
+- group: source
+ kind: Bucket
+ version: v1
+- group: source
+ kind: OCIRepository
+ version: v1
+- group: source
+ kind: ExternalArtifact
+ version: v1
version: "2"
diff --git a/README.md b/README.md
index 5f9a3f930..6f07b2e00 100644
--- a/README.md
+++ b/README.md
@@ -5,23 +5,49 @@
[](https://goreportcard.com/report/github.com/fluxcd/source-controller)
[](https://github.com/fluxcd/source-controller/blob/main/LICENSE)
[](https://github.com/fluxcd/source-controller/releases)
-
+
The source-controller is a Kubernetes operator, specialised in artifacts acquisition
-from external sources such as Git, Helm repositories and S3 buckets.
+from external sources such as Git, OCI, Helm repositories and S3-compatible buckets.
The source-controller implements the
-[source.toolkit.fluxcd.io](https://github.com/fluxcd/source-controller/tree/main/docs/spec/v1beta2) API
-and is a core component of the [GitOps toolkit](https://fluxcd.io/docs/components/).
+[source.toolkit.fluxcd.io](docs/spec/README.md) API
+and is a core component of the [GitOps toolkit](https://fluxcd.io/flux/components/).

-Features:
+## APIs
+
+| Kind | API Version |
+|----------------------------------------------------|-------------------------------|
+| [GitRepository](docs/spec/v1/gitrepositories.md) | `source.toolkit.fluxcd.io/v1` |
+| [OCIRepository](docs/spec/v1/ocirepositories.md) | `source.toolkit.fluxcd.io/v1` |
+| [HelmRepository](docs/spec/v1/helmrepositories.md) | `source.toolkit.fluxcd.io/v1` |
+| [HelmChart](docs/spec/v1/helmcharts.md) | `source.toolkit.fluxcd.io/v1` |
+| [Bucket](docs/spec/v1/buckets.md) | `source.toolkit.fluxcd.io/v1` |
+
+## Features
-* authenticates to sources (SSH, user/password, API token)
-* validates source authenticity (PGP)
+* authenticates to sources (SSH, user/password, API token, Workload Identity)
+* validates source authenticity (PGP, Cosign, Notation)
* detects source changes based on update policies (semver)
* fetches resources on-demand and on-a-schedule
* packages the fetched resources into a well-known format (tar.gz, yaml)
* makes the artifacts addressable by their source identifier (sha, version, ts)
* makes the artifacts available in-cluster to interested 3rd parties
* notifies interested 3rd parties of source changes and availability (status conditions, events, hooks)
-* reacts to Git push and Helm chart upload events (via [notification-controller](https://github.com/fluxcd/notification-controller))
+* reacts to Git, Helm and OCI artifacts push events (via [notification-controller](https://github.com/fluxcd/notification-controller))
+
+## Guides
+
+* [Get started with Flux](https://fluxcd.io/flux/get-started/)
+* [Setup Webhook Receivers](https://fluxcd.io/flux/guides/webhook-receivers/)
+* [Setup Notifications](https://fluxcd.io/flux/guides/notifications/)
+* [How to build, publish and consume OCI Artifacts with Flux](https://fluxcd.io/flux/cheatsheets/oci-artifacts/)
+
+## Roadmap
+
+The roadmap for the Flux family of projects can be found at .
+
+## Contributing
+
+This project is Apache 2.0 licensed and accepts contributions via GitHub pull requests.
+To start contributing please see the [development guide](DEVELOPMENT.md).
diff --git a/api/go.mod b/api/go.mod
index 790a076de..3d821f349 100644
--- a/api/go.mod
+++ b/api/go.mod
@@ -1,30 +1,34 @@
module github.com/fluxcd/source-controller/api
-go 1.18
+go 1.25.0
require (
- github.com/fluxcd/pkg/apis/acl v0.0.3
- github.com/fluxcd/pkg/apis/meta v0.14.2
- k8s.io/apimachinery v0.24.1
- sigs.k8s.io/controller-runtime v0.11.2
+ github.com/fluxcd/pkg/apis/acl v0.9.0
+ github.com/fluxcd/pkg/apis/meta v1.21.0
+ k8s.io/apimachinery v0.34.0
+ sigs.k8s.io/controller-runtime v0.22.1
)
// Fix CVE-2022-28948
replace gopkg.in/yaml.v3 => gopkg.in/yaml.v3 v3.0.1
require (
- github.com/go-logr/logr v1.2.2 // indirect
+ github.com/fxamacker/cbor/v2 v2.9.0 // indirect
+ github.com/go-logr/logr v1.4.2 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
- github.com/google/gofuzz v1.2.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
+ github.com/kr/pretty v0.3.1 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
- github.com/modern-go/reflect2 v1.0.2 // indirect
- golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect
- golang.org/x/text v0.3.7 // indirect
+ github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
+ github.com/x448/float16 v0.8.4 // indirect
+ go.yaml.in/yaml/v2 v2.4.2 // indirect
+ golang.org/x/net v0.43.0 // indirect
+ golang.org/x/text v0.28.0 // indirect
+ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
- gopkg.in/yaml.v2 v2.4.0 // indirect
- k8s.io/klog/v2 v2.60.1 // indirect
- k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect
- sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
- sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
+ k8s.io/klog/v2 v2.130.1 // indirect
+ k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect
+ sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
+ sigs.k8s.io/randfill v1.0.0 // indirect
+ sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
)
diff --git a/api/go.sum b/api/go.sum
index b311f6dce..1aa815d66 100644
--- a/api/go.sum
+++ b/api/go.sum
@@ -1,252 +1,118 @@
-cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
-github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
-github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
-github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
-github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
-github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
-github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
-github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
-github.com/fluxcd/pkg/apis/acl v0.0.3 h1:Lw0ZHdpnO4G7Zy9KjrzwwBmDZQuy4qEjaU/RvA6k1lc=
-github.com/fluxcd/pkg/apis/acl v0.0.3/go.mod h1:XPts6lRJ9C9fIF9xVWofmQwftvhY25n1ps7W9xw0XLU=
-github.com/fluxcd/pkg/apis/meta v0.14.2 h1:/Hf7I/Vz01vv3m7Qx7DtQvrzAL1oVt0MJcLb/I1Y1HE=
-github.com/fluxcd/pkg/apis/meta v0.14.2/go.mod h1:ijZ61VG/8T3U17gj0aFL3fdtZL+mulD6V8VrLLUCAgM=
-github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
-github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI=
-github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg=
-github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
-github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
-github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.2 h1:ahHml/yUpnlb96Rp8HCvtYVPY8ZYpxq3g7UYchIYwbs=
-github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
-github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/fluxcd/pkg/apis/acl v0.9.0 h1:wBpgsKT+jcyZEcM//OmZr9RiF8klL3ebrDp2u2ThsnA=
+github.com/fluxcd/pkg/apis/acl v0.9.0/go.mod h1:TttNS+gocsGLwnvmgVi3/Yscwqrjc17+vhgYfqkfrV4=
+github.com/fluxcd/pkg/apis/meta v1.21.0 h1:R+bN02chcs0HUmyVDQhqe/FHmYLjipVDMLnyYfNX850=
+github.com/fluxcd/pkg/apis/meta v1.21.0/go.mod h1:XUAEUgT4gkWDAEN79E141tmL+v4SV50tVZ/Ojpc/ueg=
+github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
+github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
+github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
+github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
+github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
-github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
-github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
-github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
-github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
-github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ=
-github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
+github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
+github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
-github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
-github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
-github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
-github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
+github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
-github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
-github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
-github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
-github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
-github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
-github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
-github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
-github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
-github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
-github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
-github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE=
-github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
+github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg=
+github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
+github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw=
+github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
-github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
-github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
+github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
+github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
+github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
+github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
+github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
-github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
-github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
+github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
+github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
+go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
-golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
-golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk=
-golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
+golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220209214540-3681064d5158 h1:rm+CHSpPEEW2IsXUib1ThaHIjuBVZjxNgSKmBLFfD4c=
-golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
+golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
-golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
+golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
-golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0=
+golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
-google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
-google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
-google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
-google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
-google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
-google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
-google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
-google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
-gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
-gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
-gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
-gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-k8s.io/api v0.23.5 h1:zno3LUiMubxD/V1Zw3ijyKO3wxrhbUF1Ck+VjBvfaoA=
-k8s.io/apimachinery v0.24.1 h1:ShD4aDxTQKN5zNf8K1RQ2u98ELLdIW7jEnlO9uAMX/I=
-k8s.io/apimachinery v0.24.1/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM=
-k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
-k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
-k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
-k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc=
-k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
-k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk=
-k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc=
-k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-sigs.k8s.io/controller-runtime v0.11.2 h1:H5GTxQl0Mc9UjRJhORusqfJCIjBO8UtUxGggCwL1rLA=
-sigs.k8s.io/controller-runtime v0.11.2/go.mod h1:P6QCzrEjLaZGqHsfd+os7JQ+WFZhvB8MRFsn4dWF7O4=
-sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y=
-sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY=
-sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
-sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y=
-sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
-sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
-sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
+k8s.io/api v0.34.0 h1:L+JtP2wDbEYPUeNGbeSa/5GwFtIA662EmT2YSLOkAVE=
+k8s.io/api v0.34.0/go.mod h1:YzgkIzOOlhl9uwWCZNqpw6RJy9L2FK4dlJeayUoydug=
+k8s.io/apimachinery v0.34.0 h1:eR1WO5fo0HyoQZt1wdISpFDffnWOvFLOOeJ7MgIv4z0=
+k8s.io/apimachinery v0.34.0/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
+k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
+k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
+k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
+k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+sigs.k8s.io/controller-runtime v0.22.1 h1:Ah1T7I+0A7ize291nJZdS1CabF/lB4E++WizgV24Eqg=
+sigs.k8s.io/controller-runtime v0.22.1/go.mod h1:FwiwRjkRPbiN+zp2QRp7wlTCzbUXxZ/D4OzuQUDwBHY=
+sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
+sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
+sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
+sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
+sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
+sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
+sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
+sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
diff --git a/api/v1/bucket_types.go b/api/v1/bucket_types.go
new file mode 100644
index 000000000..bbedcefb3
--- /dev/null
+++ b/api/v1/bucket_types.go
@@ -0,0 +1,281 @@
+/*
+Copyright 2024 The Flux authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ "github.com/fluxcd/pkg/apis/meta"
+)
+
+const (
+ // BucketKind is the string representation of a Bucket.
+ BucketKind = "Bucket"
+)
+
+const (
+ // BucketProviderGeneric for any S3 API compatible storage Bucket.
+ BucketProviderGeneric string = "generic"
+ // BucketProviderAmazon for an AWS S3 object storage Bucket.
+ // Provides support for retrieving credentials from the AWS EC2 service
+ // and workload identity authentication.
+ BucketProviderAmazon string = "aws"
+ // BucketProviderGoogle for a Google Cloud Storage Bucket.
+ // Provides support for authentication using a workload identity.
+ BucketProviderGoogle string = "gcp"
+ // BucketProviderAzure for an Azure Blob Storage Bucket.
+ // Provides support for authentication using a Service Principal,
+ // Managed Identity or Shared Key.
+ BucketProviderAzure string = "azure"
+)
+
+// BucketSpec specifies the required configuration to produce an Artifact for
+// an object storage bucket.
+// +kubebuilder:validation:XValidation:rule="self.provider == 'aws' || self.provider == 'generic' || !has(self.sts)", message="STS configuration is only supported for the 'aws' and 'generic' Bucket providers"
+// +kubebuilder:validation:XValidation:rule="self.provider != 'aws' || !has(self.sts) || self.sts.provider == 'aws'", message="'aws' is the only supported STS provider for the 'aws' Bucket provider"
+// +kubebuilder:validation:XValidation:rule="self.provider != 'generic' || !has(self.sts) || self.sts.provider == 'ldap'", message="'ldap' is the only supported STS provider for the 'generic' Bucket provider"
+// +kubebuilder:validation:XValidation:rule="!has(self.sts) || self.sts.provider != 'aws' || !has(self.sts.secretRef)", message="spec.sts.secretRef is not required for the 'aws' STS provider"
+// +kubebuilder:validation:XValidation:rule="!has(self.sts) || self.sts.provider != 'aws' || !has(self.sts.certSecretRef)", message="spec.sts.certSecretRef is not required for the 'aws' STS provider"
+// +kubebuilder:validation:XValidation:rule="self.provider != 'generic' || !has(self.serviceAccountName)", message="ServiceAccountName is not supported for the 'generic' Bucket provider"
+// +kubebuilder:validation:XValidation:rule="!has(self.secretRef) || !has(self.serviceAccountName)", message="cannot set both .spec.secretRef and .spec.serviceAccountName"
+type BucketSpec struct {
+ // Provider of the object storage bucket.
+ // Defaults to 'generic', which expects an S3 (API) compatible object
+ // storage.
+ // +kubebuilder:validation:Enum=generic;aws;gcp;azure
+ // +kubebuilder:default:=generic
+ // +optional
+ Provider string `json:"provider,omitempty"`
+
+ // BucketName is the name of the object storage bucket.
+ // +required
+ BucketName string `json:"bucketName"`
+
+ // Endpoint is the object storage address the BucketName is located at.
+ // +required
+ Endpoint string `json:"endpoint"`
+
+ // STS specifies the required configuration to use a Security Token
+ // Service for fetching temporary credentials to authenticate in a
+ // Bucket provider.
+ //
+ // This field is only supported for the `aws` and `generic` providers.
+ // +optional
+ STS *BucketSTSSpec `json:"sts,omitempty"`
+
+ // Insecure allows connecting to a non-TLS HTTP Endpoint.
+ // +optional
+ Insecure bool `json:"insecure,omitempty"`
+
+ // Region of the Endpoint where the BucketName is located in.
+ // +optional
+ Region string `json:"region,omitempty"`
+
+ // Prefix to use for server-side filtering of files in the Bucket.
+ // +optional
+ Prefix string `json:"prefix,omitempty"`
+
+ // SecretRef specifies the Secret containing authentication credentials
+ // for the Bucket.
+ // +optional
+ SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
+
+ // ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate
+ // the bucket. This field is only supported for the 'gcp' and 'aws' providers.
+ // For more information about workload identity:
+ // https://fluxcd.io/flux/components/source/buckets/#workload-identity
+ // +optional
+ ServiceAccountName string `json:"serviceAccountName,omitempty"`
+
+ // CertSecretRef can be given the name of a Secret containing
+ // either or both of
+ //
+ // - a PEM-encoded client certificate (`tls.crt`) and private
+ // key (`tls.key`);
+ // - a PEM-encoded CA certificate (`ca.crt`)
+ //
+ // and whichever are supplied, will be used for connecting to the
+ // bucket. The client cert and key are useful if you are
+ // authenticating with a certificate; the CA cert is useful if
+ // you are using a self-signed server certificate. The Secret must
+ // be of type `Opaque` or `kubernetes.io/tls`.
+ //
+ // This field is only supported for the `generic` provider.
+ // +optional
+ CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"`
+
+ // ProxySecretRef specifies the Secret containing the proxy configuration
+ // to use while communicating with the Bucket server.
+ // +optional
+ ProxySecretRef *meta.LocalObjectReference `json:"proxySecretRef,omitempty"`
+
+ // Interval at which the Bucket Endpoint is checked for updates.
+ // This interval is approximate and may be subject to jitter to ensure
+ // efficient use of resources.
+ // +kubebuilder:validation:Type=string
+ // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
+ // +required
+ Interval metav1.Duration `json:"interval"`
+
+ // Timeout for fetch operations, defaults to 60s.
+ // +kubebuilder:default="60s"
+ // +kubebuilder:validation:Type=string
+ // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$"
+ // +optional
+ Timeout *metav1.Duration `json:"timeout,omitempty"`
+
+ // Ignore overrides the set of excluded patterns in the .sourceignore format
+ // (which is the same as .gitignore). If not provided, a default will be used,
+ // consult the documentation for your version to find out what those are.
+ // +optional
+ Ignore *string `json:"ignore,omitempty"`
+
+ // Suspend tells the controller to suspend the reconciliation of this
+ // Bucket.
+ // +optional
+ Suspend bool `json:"suspend,omitempty"`
+}
+
+// BucketSTSSpec specifies the required configuration to use a Security Token
+// Service for fetching temporary credentials to authenticate in a Bucket
+// provider.
+type BucketSTSSpec struct {
+ // Provider of the Security Token Service.
+ // +kubebuilder:validation:Enum=aws;ldap
+ // +required
+ Provider string `json:"provider"`
+
+ // Endpoint is the HTTP/S endpoint of the Security Token Service from
+ // where temporary credentials will be fetched.
+ // +required
+ // +kubebuilder:validation:Pattern="^(http|https)://.*$"
+ Endpoint string `json:"endpoint"`
+
+ // SecretRef specifies the Secret containing authentication credentials
+ // for the STS endpoint. This Secret must contain the fields `username`
+ // and `password` and is supported only for the `ldap` provider.
+ // +optional
+ SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
+
+ // CertSecretRef can be given the name of a Secret containing
+ // either or both of
+ //
+ // - a PEM-encoded client certificate (`tls.crt`) and private
+ // key (`tls.key`);
+ // - a PEM-encoded CA certificate (`ca.crt`)
+ //
+ // and whichever are supplied, will be used for connecting to the
+ // STS endpoint. The client cert and key are useful if you are
+ // authenticating with a certificate; the CA cert is useful if
+ // you are using a self-signed server certificate. The Secret must
+ // be of type `Opaque` or `kubernetes.io/tls`.
+ //
+ // This field is only supported for the `ldap` provider.
+ // +optional
+ CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"`
+}
+
+// BucketStatus records the observed state of a Bucket.
+type BucketStatus struct {
+ // ObservedGeneration is the last observed generation of the Bucket object.
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty"`
+
+ // Conditions holds the conditions for the Bucket.
+ // +optional
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
+
+ // URL is the dynamic fetch link for the latest Artifact.
+ // It is provided on a "best effort" basis, and using the precise
+ // BucketStatus.Artifact data is recommended.
+ // +optional
+ URL string `json:"url,omitempty"`
+
+ // Artifact represents the last successful Bucket reconciliation.
+ // +optional
+ Artifact *meta.Artifact `json:"artifact,omitempty"`
+
+ // ObservedIgnore is the observed exclusion patterns used for constructing
+ // the source artifact.
+ // +optional
+ ObservedIgnore *string `json:"observedIgnore,omitempty"`
+
+ meta.ReconcileRequestStatus `json:",inline"`
+}
+
+const (
+ // BucketOperationSucceededReason signals that the Bucket listing and fetch
+ // operations succeeded.
+ BucketOperationSucceededReason string = "BucketOperationSucceeded"
+
+ // BucketOperationFailedReason signals that the Bucket listing or fetch
+ // operations failed.
+ BucketOperationFailedReason string = "BucketOperationFailed"
+)
+
+// GetConditions returns the status conditions of the object.
+func (in *Bucket) GetConditions() []metav1.Condition {
+ return in.Status.Conditions
+}
+
+// SetConditions sets the status conditions on the object.
+func (in *Bucket) SetConditions(conditions []metav1.Condition) {
+ in.Status.Conditions = conditions
+}
+
+// GetRequeueAfter returns the duration after which the source must be reconciled again.
+func (in *Bucket) GetRequeueAfter() time.Duration {
+ return in.Spec.Interval.Duration
+}
+
+// GetArtifact returns the latest artifact from the source if present in the status sub-resource.
+func (in *Bucket) GetArtifact() *meta.Artifact {
+ return in.Status.Artifact
+}
+
+// +genclient
+// +kubebuilder:storageversion
+// +kubebuilder:object:root=true
+// +kubebuilder:subresource:status
+// +kubebuilder:printcolumn:name="Endpoint",type=string,JSONPath=`.spec.endpoint`
+// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
+// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
+// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
+
+// Bucket is the Schema for the buckets API.
+type Bucket struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec BucketSpec `json:"spec,omitempty"`
+ // +kubebuilder:default={"observedGeneration":-1}
+ Status BucketStatus `json:"status,omitempty"`
+}
+
+// BucketList contains a list of Bucket objects.
+// +kubebuilder:object:root=true
+type BucketList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []Bucket `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&Bucket{}, &BucketList{})
+}
diff --git a/api/v1/condition_types.go b/api/v1/condition_types.go
new file mode 100644
index 000000000..9641db99c
--- /dev/null
+++ b/api/v1/condition_types.go
@@ -0,0 +1,118 @@
+/*
+Copyright 2023 The Flux authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+const SourceFinalizer = "finalizers.fluxcd.io"
+
+const (
+ // ArtifactInStorageCondition indicates the availability of the Artifact in
+ // the storage.
+ // If True, the Artifact is stored successfully.
+ // This Condition is only present on the resource if the Artifact is
+ // successfully stored.
+ ArtifactInStorageCondition string = "ArtifactInStorage"
+
+ // ArtifactOutdatedCondition indicates the current Artifact of the Source
+ // is outdated.
+ // This is a "negative polarity" or "abnormal-true" type, and is only
+ // present on the resource if it is True.
+ ArtifactOutdatedCondition string = "ArtifactOutdated"
+
+ // SourceVerifiedCondition indicates the integrity verification of the
+ // Source.
+ // If True, the integrity check succeeded. If False, it failed.
+ // This Condition is only present on the resource if the integrity check
+ // is enabled.
+ SourceVerifiedCondition string = "SourceVerified"
+
+ // FetchFailedCondition indicates a transient or persistent fetch failure
+ // of an upstream Source.
+ // If True, observations on the upstream Source revision may be impossible,
+ // and the Artifact available for the Source may be outdated.
+ // This is a "negative polarity" or "abnormal-true" type, and is only
+ // present on the resource if it is True.
+ FetchFailedCondition string = "FetchFailed"
+
+ // BuildFailedCondition indicates a transient or persistent build failure
+ // of a Source's Artifact.
+ // If True, the Source can be in an ArtifactOutdatedCondition.
+ // This is a "negative polarity" or "abnormal-true" type, and is only
+ // present on the resource if it is True.
+ BuildFailedCondition string = "BuildFailed"
+
+ // StorageOperationFailedCondition indicates a transient or persistent
+ // failure related to storage. If True, the reconciliation failed while
+ // performing some filesystem operation.
+ // This is a "negative polarity" or "abnormal-true" type, and is only
+ // present on the resource if it is True.
+ StorageOperationFailedCondition string = "StorageOperationFailed"
+)
+
+// Reasons are provided as utility, and not part of the declarative API.
+const (
+ // URLInvalidReason signals that a given Source has an invalid URL.
+ URLInvalidReason string = "URLInvalid"
+
+ // AuthenticationFailedReason signals that a Secret does not have the
+ // required fields, or the provided credentials do not match.
+ AuthenticationFailedReason string = "AuthenticationFailed"
+
+ // VerificationError signals that the Source's verification
+ // check failed.
+ VerificationError string = "VerificationError"
+
+ // DirCreationFailedReason signals a failure caused by a directory creation
+ // operation.
+ DirCreationFailedReason string = "DirectoryCreationFailed"
+
+ // StatOperationFailedReason signals a failure caused by a stat operation on
+ // a path.
+ StatOperationFailedReason string = "StatOperationFailed"
+
+ // ReadOperationFailedReason signals a failure caused by a read operation.
+ ReadOperationFailedReason string = "ReadOperationFailed"
+
+ // AcquireLockFailedReason signals a failure in acquiring lock.
+ AcquireLockFailedReason string = "AcquireLockFailed"
+
+ // InvalidPathReason signals a failure caused by an invalid path.
+ InvalidPathReason string = "InvalidPath"
+
+ // ArchiveOperationFailedReason signals a failure in archive operation.
+ ArchiveOperationFailedReason string = "ArchiveOperationFailed"
+
+ // SymlinkUpdateFailedReason signals a failure in updating a symlink.
+ SymlinkUpdateFailedReason string = "SymlinkUpdateFailed"
+
+ // ArtifactUpToDateReason signals that an existing Artifact is up-to-date
+ // with the Source.
+ ArtifactUpToDateReason string = "ArtifactUpToDate"
+
+ // CacheOperationFailedReason signals a failure in cache operation.
+ CacheOperationFailedReason string = "CacheOperationFailed"
+
+ // PatchOperationFailedReason signals a failure in patching a kubernetes API
+ // object.
+ PatchOperationFailedReason string = "PatchOperationFailed"
+
+ // InvalidSTSConfigurationReason signals that the STS configurtion is invalid.
+ InvalidSTSConfigurationReason string = "InvalidSTSConfiguration"
+
+ // InvalidProviderConfigurationReason signals that the provider
+ // configuration is invalid.
+ InvalidProviderConfigurationReason string = "InvalidProviderConfiguration"
+)
diff --git a/pkg/git/gogit/gogit.go b/api/v1/doc.go
similarity index 73%
rename from pkg/git/gogit/gogit.go
rename to api/v1/doc.go
index 2ce0a8649..a06b2174b 100644
--- a/pkg/git/gogit/gogit.go
+++ b/api/v1/doc.go
@@ -1,5 +1,5 @@
/*
-Copyright 2021 The Flux authors
+Copyright 2023 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,10 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package gogit
-
-import "github.com/fluxcd/source-controller/pkg/git"
-
-const (
- Implementation git.Implementation = "go-git"
-)
+// Package v1 contains API Schema definitions for the source v1 API group
+// +kubebuilder:object:generate=true
+// +groupName=source.toolkit.fluxcd.io
+package v1
diff --git a/api/v1/externalartifact_types.go b/api/v1/externalartifact_types.go
new file mode 100644
index 000000000..e338b733b
--- /dev/null
+++ b/api/v1/externalartifact_types.go
@@ -0,0 +1,97 @@
+/*
+Copyright 2025 The Flux authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ "github.com/fluxcd/pkg/apis/meta"
+)
+
+// ExternalArtifactKind is the string representation of the ExternalArtifact.
+const ExternalArtifactKind = "ExternalArtifact"
+
+// ExternalArtifactSpec defines the desired state of ExternalArtifact
+type ExternalArtifactSpec struct {
+ // SourceRef points to the Kubernetes custom resource for
+ // which the artifact is generated.
+ // +optional
+ SourceRef *meta.NamespacedObjectKindReference `json:"sourceRef,omitempty"`
+}
+
+// ExternalArtifactStatus defines the observed state of ExternalArtifact
+type ExternalArtifactStatus struct {
+ // Artifact represents the output of an ExternalArtifact reconciliation.
+ // +optional
+ Artifact *meta.Artifact `json:"artifact,omitempty"`
+
+ // Conditions holds the conditions for the ExternalArtifact.
+ // +optional
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
+}
+
+// GetConditions returns the status conditions of the object.
+func (in *ExternalArtifact) GetConditions() []metav1.Condition {
+ return in.Status.Conditions
+}
+
+// SetConditions sets the status conditions on the object.
+func (in *ExternalArtifact) SetConditions(conditions []metav1.Condition) {
+ in.Status.Conditions = conditions
+}
+
+// GetArtifact returns the latest Artifact from the ExternalArtifact if
+// present in the status sub-resource.
+func (in *ExternalArtifact) GetArtifact() *meta.Artifact {
+ return in.Status.Artifact
+}
+
+// GetRequeueAfter returns the duration after which the ExternalArtifact
+// must be reconciled again.
+func (in *ExternalArtifact) GetRequeueAfter() time.Duration {
+ return time.Minute
+}
+
+// +kubebuilder:object:root=true
+// +kubebuilder:subresource:status
+// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
+// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
+// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
+// +kubebuilder:printcolumn:name="Source",type="string",JSONPath=".spec.sourceRef.name",description=""
+
+// ExternalArtifact is the Schema for the external artifacts API
+type ExternalArtifact struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec ExternalArtifactSpec `json:"spec,omitempty"`
+ Status ExternalArtifactStatus `json:"status,omitempty"`
+}
+
+// ExternalArtifactList contains a list of ExternalArtifact
+// +kubebuilder:object:root=true
+type ExternalArtifactList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []ExternalArtifact `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&ExternalArtifact{}, &ExternalArtifactList{})
+}
diff --git a/api/v1/gitrepository_types.go b/api/v1/gitrepository_types.go
new file mode 100644
index 000000000..f104fd0f1
--- /dev/null
+++ b/api/v1/gitrepository_types.go
@@ -0,0 +1,384 @@
+/*
+Copyright 2023 The Flux authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ "github.com/fluxcd/pkg/apis/meta"
+)
+
+const (
+ // GitRepositoryKind is the string representation of a GitRepository.
+ GitRepositoryKind = "GitRepository"
+
+ // GitProviderGeneric provides support for authentication using
+ // credentials specified in secretRef.
+ GitProviderGeneric string = "generic"
+
+ // GitProviderAzure provides support for authentication to azure
+ // repositories using Managed Identity.
+ GitProviderAzure string = "azure"
+
+ // GitProviderGitHub provides support for authentication to git
+ // repositories using GitHub App authentication
+ GitProviderGitHub string = "github"
+)
+
+const (
+ // IncludeUnavailableCondition indicates one of the includes is not
+ // available. For example, because it does not exist, or does not have an
+ // Artifact.
+ // This is a "negative polarity" or "abnormal-true" type, and is only
+ // present on the resource if it is True.
+ IncludeUnavailableCondition string = "IncludeUnavailable"
+)
+
+// GitVerificationMode specifies the verification mode for a Git repository.
+type GitVerificationMode string
+
+// Valid checks the validity of the Git verification mode.
+func (m GitVerificationMode) Valid() bool {
+ switch m {
+ case ModeGitHEAD, ModeGitTag, ModeGitTagAndHEAD:
+ return true
+ default:
+ return false
+ }
+}
+
+const (
+ // ModeGitHEAD implies that the HEAD of the Git repository (after it has been
+ // checked out to the required commit) should be verified.
+ ModeGitHEAD GitVerificationMode = "HEAD"
+ // ModeGitTag implies that the tag object specified in the checkout configuration
+ // should be verified.
+ ModeGitTag GitVerificationMode = "Tag"
+ // ModeGitTagAndHEAD implies that both the tag object and the commit it points
+ // to should be verified.
+ ModeGitTagAndHEAD GitVerificationMode = "TagAndHEAD"
+)
+
+// GitRepositorySpec specifies the required configuration to produce an
+// Artifact for a Git repository.
+// +kubebuilder:validation:XValidation:rule="!has(self.serviceAccountName) || (has(self.provider) && self.provider == 'azure')",message="serviceAccountName can only be set when provider is 'azure'"
+type GitRepositorySpec struct {
+ // URL specifies the Git repository URL, it can be an HTTP/S or SSH address.
+ // +kubebuilder:validation:Pattern="^(http|https|ssh)://.*$"
+ // +required
+ URL string `json:"url"`
+
+ // SecretRef specifies the Secret containing authentication credentials for
+ // the GitRepository.
+ // For HTTPS repositories the Secret must contain 'username' and 'password'
+ // fields for basic auth or 'bearerToken' field for token auth.
+ // For SSH repositories the Secret must contain 'identity'
+ // and 'known_hosts' fields.
+ // +optional
+ SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
+
+ // Provider used for authentication, can be 'azure', 'github', 'generic'.
+ // When not specified, defaults to 'generic'.
+ // +kubebuilder:validation:Enum=generic;azure;github
+ // +optional
+ Provider string `json:"provider,omitempty"`
+
+ // ServiceAccountName is the name of the Kubernetes ServiceAccount used to
+ // authenticate to the GitRepository. This field is only supported for 'azure' provider.
+ // +optional
+ ServiceAccountName string `json:"serviceAccountName,omitempty"`
+
+ // Interval at which the GitRepository URL is checked for updates.
+ // This interval is approximate and may be subject to jitter to ensure
+ // efficient use of resources.
+ // +kubebuilder:validation:Type=string
+ // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
+ // +required
+ Interval metav1.Duration `json:"interval"`
+
+ // Timeout for Git operations like cloning, defaults to 60s.
+ // +kubebuilder:default="60s"
+ // +kubebuilder:validation:Type=string
+ // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$"
+ // +optional
+ Timeout *metav1.Duration `json:"timeout,omitempty"`
+
+ // Reference specifies the Git reference to resolve and monitor for
+ // changes, defaults to the 'master' branch.
+ // +optional
+ Reference *GitRepositoryRef `json:"ref,omitempty"`
+
+ // Verification specifies the configuration to verify the Git commit
+ // signature(s).
+ // +optional
+ Verification *GitRepositoryVerification `json:"verify,omitempty"`
+
+ // ProxySecretRef specifies the Secret containing the proxy configuration
+ // to use while communicating with the Git server.
+ // +optional
+ ProxySecretRef *meta.LocalObjectReference `json:"proxySecretRef,omitempty"`
+
+ // Ignore overrides the set of excluded patterns in the .sourceignore format
+ // (which is the same as .gitignore). If not provided, a default will be used,
+ // consult the documentation for your version to find out what those are.
+ // +optional
+ Ignore *string `json:"ignore,omitempty"`
+
+ // Suspend tells the controller to suspend the reconciliation of this
+ // GitRepository.
+ // +optional
+ Suspend bool `json:"suspend,omitempty"`
+
+ // RecurseSubmodules enables the initialization of all submodules within
+ // the GitRepository as cloned from the URL, using their default settings.
+ // +optional
+ RecurseSubmodules bool `json:"recurseSubmodules,omitempty"`
+
+ // Include specifies a list of GitRepository resources which Artifacts
+ // should be included in the Artifact produced for this GitRepository.
+ // +optional
+ Include []GitRepositoryInclude `json:"include,omitempty"`
+
+ // SparseCheckout specifies a list of directories to checkout when cloning
+ // the repository. If specified, only these directories are included in the
+ // Artifact produced for this GitRepository.
+ // +optional
+ SparseCheckout []string `json:"sparseCheckout,omitempty"`
+}
+
+// GitRepositoryInclude specifies a local reference to a GitRepository which
+// Artifact (sub-)contents must be included, and where they should be placed.
+type GitRepositoryInclude struct {
+ // GitRepositoryRef specifies the GitRepository which Artifact contents
+ // must be included.
+ // +required
+ GitRepositoryRef meta.LocalObjectReference `json:"repository"`
+
+ // FromPath specifies the path to copy contents from, defaults to the root
+ // of the Artifact.
+ // +optional
+ FromPath string `json:"fromPath,omitempty"`
+
+ // ToPath specifies the path to copy contents to, defaults to the name of
+ // the GitRepositoryRef.
+ // +optional
+ ToPath string `json:"toPath,omitempty"`
+}
+
+// GetFromPath returns the specified FromPath.
+func (in *GitRepositoryInclude) GetFromPath() string {
+ return in.FromPath
+}
+
+// GetToPath returns the specified ToPath, falling back to the name of the
+// GitRepositoryRef.
+func (in *GitRepositoryInclude) GetToPath() string {
+ if in.ToPath == "" {
+ return in.GitRepositoryRef.Name
+ }
+ return in.ToPath
+}
+
+// GitRepositoryRef specifies the Git reference to resolve and checkout.
+type GitRepositoryRef struct {
+ // Branch to check out, defaults to 'master' if no other field is defined.
+ // +optional
+ Branch string `json:"branch,omitempty"`
+
+ // Tag to check out, takes precedence over Branch.
+ // +optional
+ Tag string `json:"tag,omitempty"`
+
+ // SemVer tag expression to check out, takes precedence over Tag.
+ // +optional
+ SemVer string `json:"semver,omitempty"`
+
+ // Name of the reference to check out; takes precedence over Branch, Tag and SemVer.
+ //
+ // It must be a valid Git reference: https://git-scm.com/docs/git-check-ref-format#_description
+ // Examples: "refs/heads/main", "refs/tags/v0.1.0", "refs/pull/420/head", "refs/merge-requests/1/head"
+ // +optional
+ Name string `json:"name,omitempty"`
+
+ // Commit SHA to check out, takes precedence over all reference fields.
+ //
+ // This can be combined with Branch to shallow clone the branch, in which
+ // the commit is expected to exist.
+ // +optional
+ Commit string `json:"commit,omitempty"`
+}
+
+// GitRepositoryVerification specifies the Git commit signature verification
+// strategy.
+type GitRepositoryVerification struct {
+ // Mode specifies which Git object(s) should be verified.
+ //
+ // The variants "head" and "HEAD" both imply the same thing, i.e. verify
+ // the commit that the HEAD of the Git repository points to. The variant
+ // "head" solely exists to ensure backwards compatibility.
+ // +kubebuilder:validation:Enum=head;HEAD;Tag;TagAndHEAD
+ // +optional
+ // +kubebuilder:default:=HEAD
+ Mode GitVerificationMode `json:"mode,omitempty"`
+
+ // SecretRef specifies the Secret containing the public keys of trusted Git
+ // authors.
+ // +required
+ SecretRef meta.LocalObjectReference `json:"secretRef"`
+}
+
+// GitRepositoryStatus records the observed state of a Git repository.
+type GitRepositoryStatus struct {
+ // ObservedGeneration is the last observed generation of the GitRepository
+ // object.
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty"`
+
+ // Conditions holds the conditions for the GitRepository.
+ // +optional
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
+
+ // Artifact represents the last successful GitRepository reconciliation.
+ // +optional
+ Artifact *meta.Artifact `json:"artifact,omitempty"`
+
+ // IncludedArtifacts contains a list of the last successfully included
+ // Artifacts as instructed by GitRepositorySpec.Include.
+ // +optional
+ IncludedArtifacts []*meta.Artifact `json:"includedArtifacts,omitempty"`
+
+ // ObservedIgnore is the observed exclusion patterns used for constructing
+ // the source artifact.
+ // +optional
+ ObservedIgnore *string `json:"observedIgnore,omitempty"`
+
+ // ObservedRecurseSubmodules is the observed resource submodules
+ // configuration used to produce the current Artifact.
+ // +optional
+ ObservedRecurseSubmodules bool `json:"observedRecurseSubmodules,omitempty"`
+
+ // ObservedInclude is the observed list of GitRepository resources used to
+ // produce the current Artifact.
+ // +optional
+ ObservedInclude []GitRepositoryInclude `json:"observedInclude,omitempty"`
+
+ // ObservedSparseCheckout is the observed list of directories used to
+ // produce the current Artifact.
+ // +optional
+ ObservedSparseCheckout []string `json:"observedSparseCheckout,omitempty"`
+
+ // SourceVerificationMode is the last used verification mode indicating
+ // which Git object(s) have been verified.
+ // +optional
+ SourceVerificationMode *GitVerificationMode `json:"sourceVerificationMode,omitempty"`
+
+ meta.ReconcileRequestStatus `json:",inline"`
+}
+
+const (
+ // GitOperationSucceedReason signals that a Git operation (e.g. clone,
+ // checkout, etc.) succeeded.
+ GitOperationSucceedReason string = "GitOperationSucceeded"
+
+ // GitOperationFailedReason signals that a Git operation (e.g. clone,
+ // checkout, etc.) failed.
+ GitOperationFailedReason string = "GitOperationFailed"
+)
+
+// GetConditions returns the status conditions of the object.
+func (in GitRepository) GetConditions() []metav1.Condition {
+ return in.Status.Conditions
+}
+
+// SetConditions sets the status conditions on the object.
+func (in *GitRepository) SetConditions(conditions []metav1.Condition) {
+ in.Status.Conditions = conditions
+}
+
+// GetRequeueAfter returns the duration after which the GitRepository must be
+// reconciled again.
+func (in GitRepository) GetRequeueAfter() time.Duration {
+ return in.Spec.Interval.Duration
+}
+
+// GetArtifact returns the latest Artifact from the GitRepository if present in
+// the status sub-resource.
+func (in *GitRepository) GetArtifact() *meta.Artifact {
+ return in.Status.Artifact
+}
+
+// GetProvider returns the Git authentication provider.
+func (v *GitRepository) GetProvider() string {
+ if v.Spec.Provider == "" {
+ return GitProviderGeneric
+ }
+ return v.Spec.Provider
+}
+
+// GetMode returns the declared GitVerificationMode, or a ModeGitHEAD default.
+func (v *GitRepositoryVerification) GetMode() GitVerificationMode {
+ if v.Mode.Valid() {
+ return v.Mode
+ }
+ return ModeGitHEAD
+}
+
+// VerifyHEAD returns if the configured mode instructs verification of the
+// Git HEAD.
+func (v *GitRepositoryVerification) VerifyHEAD() bool {
+ return v.GetMode() == ModeGitHEAD || v.GetMode() == ModeGitTagAndHEAD
+}
+
+// VerifyTag returns if the configured mode instructs verification of the
+// Git tag.
+func (v *GitRepositoryVerification) VerifyTag() bool {
+ return v.GetMode() == ModeGitTag || v.GetMode() == ModeGitTagAndHEAD
+}
+
+// +genclient
+// +kubebuilder:storageversion
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:shortName=gitrepo
+// +kubebuilder:subresource:status
+// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url`
+// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
+// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
+// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
+
+// GitRepository is the Schema for the gitrepositories API.
+type GitRepository struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec GitRepositorySpec `json:"spec,omitempty"`
+ // +kubebuilder:default={"observedGeneration":-1}
+ Status GitRepositoryStatus `json:"status,omitempty"`
+}
+
+// GitRepositoryList contains a list of GitRepository objects.
+// +kubebuilder:object:root=true
+type GitRepositoryList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []GitRepository `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&GitRepository{}, &GitRepositoryList{})
+}
diff --git a/api/v1/groupversion_info.go b/api/v1/groupversion_info.go
new file mode 100644
index 000000000..b539a7947
--- /dev/null
+++ b/api/v1/groupversion_info.go
@@ -0,0 +1,33 @@
+/*
+Copyright 2023 The Flux authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "sigs.k8s.io/controller-runtime/pkg/scheme"
+)
+
+var (
+ // GroupVersion is group version used to register these objects.
+ GroupVersion = schema.GroupVersion{Group: "source.toolkit.fluxcd.io", Version: "v1"}
+
+ // SchemeBuilder is used to add go types to the GroupVersionKind scheme.
+ SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
+
+ // AddToScheme adds the types in this group-version to the given scheme.
+ AddToScheme = SchemeBuilder.AddToScheme
+)
diff --git a/api/v1/helmchart_types.go b/api/v1/helmchart_types.go
new file mode 100644
index 000000000..23cb24146
--- /dev/null
+++ b/api/v1/helmchart_types.go
@@ -0,0 +1,227 @@
+/*
+Copyright 2024 The Flux authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ "github.com/fluxcd/pkg/apis/meta"
+)
+
+// HelmChartKind is the string representation of a HelmChart.
+const HelmChartKind = "HelmChart"
+
+// HelmChartSpec specifies the desired state of a Helm chart.
+type HelmChartSpec struct {
+ // Chart is the name or path the Helm chart is available at in the
+ // SourceRef.
+ // +required
+ Chart string `json:"chart"`
+
+ // Version is the chart version semver expression, ignored for charts from
+ // GitRepository and Bucket sources. Defaults to latest when omitted.
+ // +kubebuilder:default:=*
+ // +optional
+ Version string `json:"version,omitempty"`
+
+ // SourceRef is the reference to the Source the chart is available at.
+ // +required
+ SourceRef LocalHelmChartSourceReference `json:"sourceRef"`
+
+ // Interval at which the HelmChart SourceRef is checked for updates.
+ // This interval is approximate and may be subject to jitter to ensure
+ // efficient use of resources.
+ // +kubebuilder:validation:Type=string
+ // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
+ // +required
+ Interval metav1.Duration `json:"interval"`
+
+ // ReconcileStrategy determines what enables the creation of a new artifact.
+ // Valid values are ('ChartVersion', 'Revision').
+ // See the documentation of the values for an explanation on their behavior.
+ // Defaults to ChartVersion when omitted.
+ // +kubebuilder:validation:Enum=ChartVersion;Revision
+ // +kubebuilder:default:=ChartVersion
+ // +optional
+ ReconcileStrategy string `json:"reconcileStrategy,omitempty"`
+
+ // ValuesFiles is an alternative list of values files to use as the chart
+ // values (values.yaml is not included by default), expected to be a
+ // relative path in the SourceRef.
+ // Values files are merged in the order of this list with the last file
+ // overriding the first. Ignored when omitted.
+ // +optional
+ ValuesFiles []string `json:"valuesFiles,omitempty"`
+
+ // IgnoreMissingValuesFiles controls whether to silently ignore missing values
+ // files rather than failing.
+ // +optional
+ IgnoreMissingValuesFiles bool `json:"ignoreMissingValuesFiles,omitempty"`
+
+ // Suspend tells the controller to suspend the reconciliation of this
+ // source.
+ // +optional
+ Suspend bool `json:"suspend,omitempty"`
+
+ // Verify contains the secret name containing the trusted public keys
+ // used to verify the signature and specifies which provider to use to check
+ // whether OCI image is authentic.
+ // This field is only supported when using HelmRepository source with spec.type 'oci'.
+ // Chart dependencies, which are not bundled in the umbrella chart artifact, are not verified.
+ // +optional
+ Verify *OCIRepositoryVerification `json:"verify,omitempty"`
+}
+
+const (
+ // ReconcileStrategyChartVersion reconciles when the version of the Helm chart is different.
+ ReconcileStrategyChartVersion string = "ChartVersion"
+
+ // ReconcileStrategyRevision reconciles when the Revision of the source is different.
+ ReconcileStrategyRevision string = "Revision"
+)
+
+// LocalHelmChartSourceReference contains enough information to let you locate
+// the typed referenced object at namespace level.
+type LocalHelmChartSourceReference struct {
+ // APIVersion of the referent.
+ // +optional
+ APIVersion string `json:"apiVersion,omitempty"`
+
+ // Kind of the referent, valid values are ('HelmRepository', 'GitRepository',
+ // 'Bucket').
+ // +kubebuilder:validation:Enum=HelmRepository;GitRepository;Bucket
+ // +required
+ Kind string `json:"kind"`
+
+ // Name of the referent.
+ // +required
+ Name string `json:"name"`
+}
+
+// HelmChartStatus records the observed state of the HelmChart.
+type HelmChartStatus struct {
+ // ObservedGeneration is the last observed generation of the HelmChart
+ // object.
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty"`
+
+ // ObservedSourceArtifactRevision is the last observed Artifact.Revision
+ // of the HelmChartSpec.SourceRef.
+ // +optional
+ ObservedSourceArtifactRevision string `json:"observedSourceArtifactRevision,omitempty"`
+
+ // ObservedChartName is the last observed chart name as specified by the
+ // resolved chart reference.
+ // +optional
+ ObservedChartName string `json:"observedChartName,omitempty"`
+
+ // ObservedValuesFiles are the observed value files of the last successful
+ // reconciliation.
+ // It matches the chart in the last successfully reconciled artifact.
+ // +optional
+ ObservedValuesFiles []string `json:"observedValuesFiles,omitempty"`
+
+ // Conditions holds the conditions for the HelmChart.
+ // +optional
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
+
+ // URL is the dynamic fetch link for the latest Artifact.
+ // It is provided on a "best effort" basis, and using the precise
+ // BucketStatus.Artifact data is recommended.
+ // +optional
+ URL string `json:"url,omitempty"`
+
+ // Artifact represents the output of the last successful reconciliation.
+ // +optional
+ Artifact *meta.Artifact `json:"artifact,omitempty"`
+
+ meta.ReconcileRequestStatus `json:",inline"`
+}
+
+const (
+ // ChartPullSucceededReason signals that the pull of the Helm chart
+ // succeeded.
+ ChartPullSucceededReason string = "ChartPullSucceeded"
+
+ // ChartPackageSucceededReason signals that the package of the Helm
+ // chart succeeded.
+ ChartPackageSucceededReason string = "ChartPackageSucceeded"
+)
+
+// GetConditions returns the status conditions of the object.
+func (in HelmChart) GetConditions() []metav1.Condition {
+ return in.Status.Conditions
+}
+
+// SetConditions sets the status conditions on the object.
+func (in *HelmChart) SetConditions(conditions []metav1.Condition) {
+ in.Status.Conditions = conditions
+}
+
+// GetRequeueAfter returns the duration after which the source must be
+// reconciled again.
+func (in HelmChart) GetRequeueAfter() time.Duration {
+ return in.Spec.Interval.Duration
+}
+
+// GetArtifact returns the latest artifact from the source if present in the
+// status sub-resource.
+func (in *HelmChart) GetArtifact() *meta.Artifact {
+ return in.Status.Artifact
+}
+
+// GetValuesFiles returns a merged list of HelmChartSpec.ValuesFiles.
+func (in *HelmChart) GetValuesFiles() []string {
+ return in.Spec.ValuesFiles
+}
+
+// +genclient
+// +kubebuilder:storageversion
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:shortName=hc
+// +kubebuilder:subresource:status
+// +kubebuilder:printcolumn:name="Chart",type=string,JSONPath=`.spec.chart`
+// +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.spec.version`
+// +kubebuilder:printcolumn:name="Source Kind",type=string,JSONPath=`.spec.sourceRef.kind`
+// +kubebuilder:printcolumn:name="Source Name",type=string,JSONPath=`.spec.sourceRef.name`
+// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
+// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
+// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
+
+// HelmChart is the Schema for the helmcharts API.
+type HelmChart struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec HelmChartSpec `json:"spec,omitempty"`
+ // +kubebuilder:default={"observedGeneration":-1}
+ Status HelmChartStatus `json:"status,omitempty"`
+}
+
+// HelmChartList contains a list of HelmChart objects.
+// +kubebuilder:object:root=true
+type HelmChartList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []HelmChart `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&HelmChart{}, &HelmChartList{})
+}
diff --git a/api/v1/helmrepository_types.go b/api/v1/helmrepository_types.go
new file mode 100644
index 000000000..1c19064a5
--- /dev/null
+++ b/api/v1/helmrepository_types.go
@@ -0,0 +1,228 @@
+/*
+Copyright 2024 The Flux authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ "github.com/fluxcd/pkg/apis/acl"
+ "github.com/fluxcd/pkg/apis/meta"
+)
+
+const (
+ // HelmRepositoryKind is the string representation of a HelmRepository.
+ HelmRepositoryKind = "HelmRepository"
+ // HelmRepositoryURLIndexKey is the key used for indexing HelmRepository
+ // objects by their HelmRepositorySpec.URL.
+ HelmRepositoryURLIndexKey = ".metadata.helmRepositoryURL"
+ // HelmRepositoryTypeDefault is the default HelmRepository type.
+ // It is used when no type is specified and corresponds to a Helm repository.
+ HelmRepositoryTypeDefault = "default"
+ // HelmRepositoryTypeOCI is the type for an OCI repository.
+ HelmRepositoryTypeOCI = "oci"
+)
+
+// HelmRepositorySpec specifies the required configuration to produce an
+// Artifact for a Helm repository index YAML.
+type HelmRepositorySpec struct {
+ // URL of the Helm repository, a valid URL contains at least a protocol and
+ // host.
+ // +kubebuilder:validation:Pattern="^(http|https|oci)://.*$"
+ // +required
+ URL string `json:"url"`
+
+ // SecretRef specifies the Secret containing authentication credentials
+ // for the HelmRepository.
+ // For HTTP/S basic auth the secret must contain 'username' and 'password'
+ // fields.
+ // Support for TLS auth using the 'certFile' and 'keyFile', and/or 'caFile'
+ // keys is deprecated. Please use `.spec.certSecretRef` instead.
+ // +optional
+ SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
+
+ // CertSecretRef can be given the name of a Secret containing
+ // either or both of
+ //
+ // - a PEM-encoded client certificate (`tls.crt`) and private
+ // key (`tls.key`);
+ // - a PEM-encoded CA certificate (`ca.crt`)
+ //
+ // and whichever are supplied, will be used for connecting to the
+ // registry. The client cert and key are useful if you are
+ // authenticating with a certificate; the CA cert is useful if
+ // you are using a self-signed server certificate. The Secret must
+ // be of type `Opaque` or `kubernetes.io/tls`.
+ //
+ // It takes precedence over the values specified in the Secret referred
+ // to by `.spec.secretRef`.
+ // +optional
+ CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"`
+
+ // PassCredentials allows the credentials from the SecretRef to be passed
+ // on to a host that does not match the host as defined in URL.
+ // This may be required if the host of the advertised chart URLs in the
+ // index differ from the defined URL.
+ // Enabling this should be done with caution, as it can potentially result
+ // in credentials getting stolen in a MITM-attack.
+ // +optional
+ PassCredentials bool `json:"passCredentials,omitempty"`
+
+ // Interval at which the HelmRepository URL is checked for updates.
+ // This interval is approximate and may be subject to jitter to ensure
+ // efficient use of resources.
+ // +kubebuilder:validation:Type=string
+ // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
+ // +optional
+ Interval metav1.Duration `json:"interval,omitempty"`
+
+ // Insecure allows connecting to a non-TLS HTTP container registry.
+ // This field is only taken into account if the .spec.type field is set to 'oci'.
+ // +optional
+ Insecure bool `json:"insecure,omitempty"`
+
+ // Timeout is used for the index fetch operation for an HTTPS helm repository,
+ // and for remote OCI Repository operations like pulling for an OCI helm
+ // chart by the associated HelmChart.
+ // Its default value is 60s.
+ // +kubebuilder:validation:Type=string
+ // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$"
+ // +optional
+ Timeout *metav1.Duration `json:"timeout,omitempty"`
+
+ // Suspend tells the controller to suspend the reconciliation of this
+ // HelmRepository.
+ // +optional
+ Suspend bool `json:"suspend,omitempty"`
+
+ // AccessFrom specifies an Access Control List for allowing cross-namespace
+ // references to this object.
+ // NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092
+ // +optional
+ AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"`
+
+ // Type of the HelmRepository.
+ // When this field is set to "oci", the URL field value must be prefixed with "oci://".
+ // +kubebuilder:validation:Enum=default;oci
+ // +optional
+ Type string `json:"type,omitempty"`
+
+ // Provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'.
+ // This field is optional, and only taken into account if the .spec.type field is set to 'oci'.
+ // When not specified, defaults to 'generic'.
+ // +kubebuilder:validation:Enum=generic;aws;azure;gcp
+ // +kubebuilder:default:=generic
+ // +optional
+ Provider string `json:"provider,omitempty"`
+}
+
+// HelmRepositoryStatus records the observed state of the HelmRepository.
+type HelmRepositoryStatus struct {
+ // ObservedGeneration is the last observed generation of the HelmRepository
+ // object.
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty"`
+
+ // Conditions holds the conditions for the HelmRepository.
+ // +optional
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
+
+ // URL is the dynamic fetch link for the latest Artifact.
+ // It is provided on a "best effort" basis, and using the precise
+ // HelmRepositoryStatus.Artifact data is recommended.
+ // +optional
+ URL string `json:"url,omitempty"`
+
+ // Artifact represents the last successful HelmRepository reconciliation.
+ // +optional
+ Artifact *meta.Artifact `json:"artifact,omitempty"`
+
+ meta.ReconcileRequestStatus `json:",inline"`
+}
+
+const (
+ // IndexationFailedReason signals that the HelmRepository index fetch
+ // failed.
+ IndexationFailedReason string = "IndexationFailed"
+)
+
+// GetConditions returns the status conditions of the object.
+func (in HelmRepository) GetConditions() []metav1.Condition {
+ return in.Status.Conditions
+}
+
+// SetConditions sets the status conditions on the object.
+func (in *HelmRepository) SetConditions(conditions []metav1.Condition) {
+ in.Status.Conditions = conditions
+}
+
+// GetRequeueAfter returns the duration after which the source must be
+// reconciled again.
+func (in HelmRepository) GetRequeueAfter() time.Duration {
+ if in.Spec.Interval.Duration != 0 {
+ return in.Spec.Interval.Duration
+ }
+ return time.Minute
+}
+
+// GetTimeout returns the timeout duration used for various operations related
+// to this HelmRepository.
+func (in HelmRepository) GetTimeout() time.Duration {
+ if in.Spec.Timeout != nil {
+ return in.Spec.Timeout.Duration
+ }
+ return time.Minute
+}
+
+// GetArtifact returns the latest artifact from the source if present in the
+// status sub-resource.
+func (in *HelmRepository) GetArtifact() *meta.Artifact {
+ return in.Status.Artifact
+}
+
+// +genclient
+// +kubebuilder:storageversion
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:shortName=helmrepo
+// +kubebuilder:subresource:status
+// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url`
+// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
+// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
+// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
+
+// HelmRepository is the Schema for the helmrepositories API.
+type HelmRepository struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec HelmRepositorySpec `json:"spec,omitempty"`
+ // +kubebuilder:default={"observedGeneration":-1}
+ Status HelmRepositoryStatus `json:"status,omitempty"`
+}
+
+// HelmRepositoryList contains a list of HelmRepository objects.
+// +kubebuilder:object:root=true
+type HelmRepositoryList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []HelmRepository `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&HelmRepository{}, &HelmRepositoryList{})
+}
diff --git a/api/v1/ocirepository_types.go b/api/v1/ocirepository_types.go
new file mode 100644
index 000000000..8c4d3f0fc
--- /dev/null
+++ b/api/v1/ocirepository_types.go
@@ -0,0 +1,296 @@
+/*
+Copyright 2025 The Flux authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ "github.com/fluxcd/pkg/apis/meta"
+)
+
+const (
+ // OCIRepositoryKind is the string representation of an OCIRepository.
+ OCIRepositoryKind = "OCIRepository"
+
+ // OCIRepositoryPrefix is the prefix used for OCIRepository URLs.
+ OCIRepositoryPrefix = "oci://"
+
+ // GenericOCIProvider provides support for authentication using static credentials
+ // for any OCI compatible API such as Docker Registry, GitHub Container Registry,
+ // Docker Hub, Quay, etc.
+ GenericOCIProvider string = "generic"
+
+ // AmazonOCIProvider provides support for OCI authentication using AWS IRSA.
+ AmazonOCIProvider string = "aws"
+
+ // GoogleOCIProvider provides support for OCI authentication using GCP workload identity.
+ GoogleOCIProvider string = "gcp"
+
+ // AzureOCIProvider provides support for OCI authentication using a Azure Service Principal,
+ // Managed Identity or Shared Key.
+ AzureOCIProvider string = "azure"
+
+ // OCILayerExtract defines the operation type for extracting the content from an OCI artifact layer.
+ OCILayerExtract = "extract"
+
+ // OCILayerCopy defines the operation type for copying the content from an OCI artifact layer.
+ OCILayerCopy = "copy"
+)
+
+// OCIRepositorySpec defines the desired state of OCIRepository
+type OCIRepositorySpec struct {
+ // URL is a reference to an OCI artifact repository hosted
+ // on a remote container registry.
+ // +kubebuilder:validation:Pattern="^oci://.*$"
+ // +required
+ URL string `json:"url"`
+
+ // The OCI reference to pull and monitor for changes,
+ // defaults to the latest tag.
+ // +optional
+ Reference *OCIRepositoryRef `json:"ref,omitempty"`
+
+ // LayerSelector specifies which layer should be extracted from the OCI artifact.
+ // When not specified, the first layer found in the artifact is selected.
+ // +optional
+ LayerSelector *OCILayerSelector `json:"layerSelector,omitempty"`
+
+ // The provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'.
+ // When not specified, defaults to 'generic'.
+ // +kubebuilder:validation:Enum=generic;aws;azure;gcp
+ // +kubebuilder:default:=generic
+ // +optional
+ Provider string `json:"provider,omitempty"`
+
+ // SecretRef contains the secret name containing the registry login
+ // credentials to resolve image metadata.
+ // The secret must be of type kubernetes.io/dockerconfigjson.
+ // +optional
+ SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
+
+ // Verify contains the secret name containing the trusted public keys
+ // used to verify the signature and specifies which provider to use to check
+ // whether OCI image is authentic.
+ // +optional
+ Verify *OCIRepositoryVerification `json:"verify,omitempty"`
+
+ // ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate
+ // the image pull if the service account has attached pull secrets. For more information:
+ // https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account
+ // +optional
+ ServiceAccountName string `json:"serviceAccountName,omitempty"`
+
+ // CertSecretRef can be given the name of a Secret containing
+ // either or both of
+ //
+ // - a PEM-encoded client certificate (`tls.crt`) and private
+ // key (`tls.key`);
+ // - a PEM-encoded CA certificate (`ca.crt`)
+ //
+ // and whichever are supplied, will be used for connecting to the
+ // registry. The client cert and key are useful if you are
+ // authenticating with a certificate; the CA cert is useful if
+ // you are using a self-signed server certificate. The Secret must
+ // be of type `Opaque` or `kubernetes.io/tls`.
+ // +optional
+ CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"`
+
+ // ProxySecretRef specifies the Secret containing the proxy configuration
+ // to use while communicating with the container registry.
+ // +optional
+ ProxySecretRef *meta.LocalObjectReference `json:"proxySecretRef,omitempty"`
+
+ // Interval at which the OCIRepository URL is checked for updates.
+ // This interval is approximate and may be subject to jitter to ensure
+ // efficient use of resources.
+ // +kubebuilder:validation:Type=string
+ // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
+ // +required
+ Interval metav1.Duration `json:"interval"`
+
+ // The timeout for remote OCI Repository operations like pulling, defaults to 60s.
+ // +kubebuilder:default="60s"
+ // +kubebuilder:validation:Type=string
+ // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$"
+ // +optional
+ Timeout *metav1.Duration `json:"timeout,omitempty"`
+
+ // Ignore overrides the set of excluded patterns in the .sourceignore format
+ // (which is the same as .gitignore). If not provided, a default will be used,
+ // consult the documentation for your version to find out what those are.
+ // +optional
+ Ignore *string `json:"ignore,omitempty"`
+
+ // Insecure allows connecting to a non-TLS HTTP container registry.
+ // +optional
+ Insecure bool `json:"insecure,omitempty"`
+
+ // This flag tells the controller to suspend the reconciliation of this source.
+ // +optional
+ Suspend bool `json:"suspend,omitempty"`
+}
+
+// OCIRepositoryRef defines the image reference for the OCIRepository's URL
+type OCIRepositoryRef struct {
+ // Digest is the image digest to pull, takes precedence over SemVer.
+ // The value should be in the format 'sha256:'.
+ // +optional
+ Digest string `json:"digest,omitempty"`
+
+ // SemVer is the range of tags to pull selecting the latest within
+ // the range, takes precedence over Tag.
+ // +optional
+ SemVer string `json:"semver,omitempty"`
+
+ // SemverFilter is a regex pattern to filter the tags within the SemVer range.
+ // +optional
+ SemverFilter string `json:"semverFilter,omitempty"`
+
+ // Tag is the image tag to pull, defaults to latest.
+ // +optional
+ Tag string `json:"tag,omitempty"`
+}
+
+// OCILayerSelector specifies which layer should be extracted from an OCI Artifact
+type OCILayerSelector struct {
+ // MediaType specifies the OCI media type of the layer
+ // which should be extracted from the OCI Artifact. The
+ // first layer matching this type is selected.
+ // +optional
+ MediaType string `json:"mediaType,omitempty"`
+
+ // Operation specifies how the selected layer should be processed.
+ // By default, the layer compressed content is extracted to storage.
+ // When the operation is set to 'copy', the layer compressed content
+ // is persisted to storage as it is.
+ // +kubebuilder:validation:Enum=extract;copy
+ // +optional
+ Operation string `json:"operation,omitempty"`
+}
+
+// OCIRepositoryStatus defines the observed state of OCIRepository
+type OCIRepositoryStatus struct {
+ // ObservedGeneration is the last observed generation.
+ // +optional
+ ObservedGeneration int64 `json:"observedGeneration,omitempty"`
+
+ // Conditions holds the conditions for the OCIRepository.
+ // +optional
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
+
+ // URL is the download link for the artifact output of the last OCI Repository sync.
+ // +optional
+ URL string `json:"url,omitempty"`
+
+ // Artifact represents the output of the last successful OCI Repository sync.
+ // +optional
+ Artifact *meta.Artifact `json:"artifact,omitempty"`
+
+ // ObservedIgnore is the observed exclusion patterns used for constructing
+ // the source artifact.
+ // +optional
+ ObservedIgnore *string `json:"observedIgnore,omitempty"`
+
+ // ObservedLayerSelector is the observed layer selector used for constructing
+ // the source artifact.
+ // +optional
+ ObservedLayerSelector *OCILayerSelector `json:"observedLayerSelector,omitempty"`
+
+ meta.ReconcileRequestStatus `json:",inline"`
+}
+
+const (
+ // OCIPullFailedReason signals that a pull operation failed.
+ OCIPullFailedReason string = "OCIArtifactPullFailed"
+
+ // OCILayerOperationFailedReason signals that an OCI layer operation failed.
+ OCILayerOperationFailedReason string = "OCIArtifactLayerOperationFailed"
+)
+
+// GetConditions returns the status conditions of the object.
+func (in OCIRepository) GetConditions() []metav1.Condition {
+ return in.Status.Conditions
+}
+
+// SetConditions sets the status conditions on the object.
+func (in *OCIRepository) SetConditions(conditions []metav1.Condition) {
+ in.Status.Conditions = conditions
+}
+
+// GetRequeueAfter returns the duration after which the OCIRepository must be
+// reconciled again.
+func (in OCIRepository) GetRequeueAfter() time.Duration {
+ return in.Spec.Interval.Duration
+}
+
+// GetArtifact returns the latest Artifact from the OCIRepository if present in
+// the status sub-resource.
+func (in *OCIRepository) GetArtifact() *meta.Artifact {
+ return in.Status.Artifact
+}
+
+// GetLayerMediaType returns the media type layer selector if found in spec.
+func (in *OCIRepository) GetLayerMediaType() string {
+ if in.Spec.LayerSelector == nil {
+ return ""
+ }
+
+ return in.Spec.LayerSelector.MediaType
+}
+
+// GetLayerOperation returns the layer selector operation (defaults to extract).
+func (in *OCIRepository) GetLayerOperation() string {
+ if in.Spec.LayerSelector == nil || in.Spec.LayerSelector.Operation == "" {
+ return OCILayerExtract
+ }
+
+ return in.Spec.LayerSelector.Operation
+}
+
+// +genclient
+// +kubebuilder:storageversion
+// +kubebuilder:object:root=true
+// +kubebuilder:resource:shortName=ocirepo
+// +kubebuilder:subresource:status
+// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url`
+// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
+// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
+// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
+
+// OCIRepository is the Schema for the ocirepositories API
+type OCIRepository struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec OCIRepositorySpec `json:"spec,omitempty"`
+ // +kubebuilder:default={"observedGeneration":-1}
+ Status OCIRepositoryStatus `json:"status,omitempty"`
+}
+
+// OCIRepositoryList contains a list of OCIRepository
+// +kubebuilder:object:root=true
+type OCIRepositoryList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []OCIRepository `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&OCIRepository{}, &OCIRepositoryList{})
+}
diff --git a/api/v1/ociverification_types.go b/api/v1/ociverification_types.go
new file mode 100644
index 000000000..de74be343
--- /dev/null
+++ b/api/v1/ociverification_types.go
@@ -0,0 +1,56 @@
+/*
+Copyright 2024 The Flux authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "github.com/fluxcd/pkg/apis/meta"
+)
+
+// OCIRepositoryVerification verifies the authenticity of an OCI Artifact
+type OCIRepositoryVerification struct {
+ // Provider specifies the technology used to sign the OCI Artifact.
+ // +kubebuilder:validation:Enum=cosign;notation
+ // +kubebuilder:default:=cosign
+ Provider string `json:"provider"`
+
+ // SecretRef specifies the Kubernetes Secret containing the
+ // trusted public keys.
+ // +optional
+ SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
+
+ // MatchOIDCIdentity specifies the identity matching criteria to use
+ // while verifying an OCI artifact which was signed using Cosign keyless
+ // signing. The artifact's identity is deemed to be verified if any of the
+ // specified matchers match against the identity.
+ // +optional
+ MatchOIDCIdentity []OIDCIdentityMatch `json:"matchOIDCIdentity,omitempty"`
+}
+
+// OIDCIdentityMatch specifies options for verifying the certificate identity,
+// i.e. the issuer and the subject of the certificate.
+type OIDCIdentityMatch struct {
+ // Issuer specifies the regex pattern to match against to verify
+ // the OIDC issuer in the Fulcio certificate. The pattern must be a
+ // valid Go regular expression.
+ // +required
+ Issuer string `json:"issuer"`
+ // Subject specifies the regex pattern to match against to verify
+ // the identity subject in the Fulcio certificate. The pattern must
+ // be a valid Go regular expression.
+ // +required
+ Subject string `json:"subject"`
+}
diff --git a/api/v1/source.go b/api/v1/source.go
new file mode 100644
index 000000000..d879f6034
--- /dev/null
+++ b/api/v1/source.go
@@ -0,0 +1,47 @@
+/*
+Copyright 2023 The Flux authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "time"
+
+ "k8s.io/apimachinery/pkg/runtime"
+
+ "github.com/fluxcd/pkg/apis/meta"
+)
+
+const (
+ // SourceIndexKey is the key used for indexing objects based on their
+ // referenced Source.
+ SourceIndexKey string = ".metadata.source"
+)
+
+// Source interface must be supported by all API types.
+// Source is the interface that provides generic access to the Artifact and
+// interval. It must be supported by all kinds of the source.toolkit.fluxcd.io
+// API group.
+//
+// +k8s:deepcopy-gen=false
+type Source interface {
+ runtime.Object
+ // GetRequeueAfter returns the duration after which the source must be
+ // reconciled again.
+ GetRequeueAfter() time.Duration
+ // GetArtifact returns the latest artifact from the source if present in
+ // the status sub-resource.
+ GetArtifact() *meta.Artifact
+}
diff --git a/pkg/git/libgit2/libgit2.go b/api/v1/sts_types.go
similarity index 56%
rename from pkg/git/libgit2/libgit2.go
rename to api/v1/sts_types.go
index e705e6b0a..4b1d05881 100644
--- a/pkg/git/libgit2/libgit2.go
+++ b/api/v1/sts_types.go
@@ -1,5 +1,5 @@
/*
-Copyright 2021 The Flux authors
+Copyright 2024 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,10 +14,13 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package libgit2
-
-import "github.com/fluxcd/source-controller/pkg/git"
+package v1
const (
- Implementation git.Implementation = "libgit2"
+ // STSProviderAmazon represents the AWS provider for Security Token Service.
+ // Provides support for fetching temporary credentials from an AWS STS endpoint.
+ STSProviderAmazon string = "aws"
+ // STSProviderLDAP represents the LDAP provider for Security Token Service.
+ // Provides support for fetching temporary credentials from an LDAP endpoint.
+ STSProviderLDAP string = "ldap"
)
diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go
new file mode 100644
index 000000000..14f1ba3c2
--- /dev/null
+++ b/api/v1/zz_generated.deepcopy.go
@@ -0,0 +1,998 @@
+//go:build !ignore_autogenerated
+
+/*
+Copyright 2025 The Flux authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by controller-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "github.com/fluxcd/pkg/apis/acl"
+ "github.com/fluxcd/pkg/apis/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Bucket) DeepCopyInto(out *Bucket) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Bucket.
+func (in *Bucket) DeepCopy() *Bucket {
+ if in == nil {
+ return nil
+ }
+ out := new(Bucket)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Bucket) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BucketList) DeepCopyInto(out *BucketList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Bucket, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketList.
+func (in *BucketList) DeepCopy() *BucketList {
+ if in == nil {
+ return nil
+ }
+ out := new(BucketList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *BucketList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BucketSTSSpec) DeepCopyInto(out *BucketSTSSpec) {
+ *out = *in
+ if in.SecretRef != nil {
+ in, out := &in.SecretRef, &out.SecretRef
+ *out = new(meta.LocalObjectReference)
+ **out = **in
+ }
+ if in.CertSecretRef != nil {
+ in, out := &in.CertSecretRef, &out.CertSecretRef
+ *out = new(meta.LocalObjectReference)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketSTSSpec.
+func (in *BucketSTSSpec) DeepCopy() *BucketSTSSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(BucketSTSSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BucketSpec) DeepCopyInto(out *BucketSpec) {
+ *out = *in
+ if in.STS != nil {
+ in, out := &in.STS, &out.STS
+ *out = new(BucketSTSSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.SecretRef != nil {
+ in, out := &in.SecretRef, &out.SecretRef
+ *out = new(meta.LocalObjectReference)
+ **out = **in
+ }
+ if in.CertSecretRef != nil {
+ in, out := &in.CertSecretRef, &out.CertSecretRef
+ *out = new(meta.LocalObjectReference)
+ **out = **in
+ }
+ if in.ProxySecretRef != nil {
+ in, out := &in.ProxySecretRef, &out.ProxySecretRef
+ *out = new(meta.LocalObjectReference)
+ **out = **in
+ }
+ out.Interval = in.Interval
+ if in.Timeout != nil {
+ in, out := &in.Timeout, &out.Timeout
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.Ignore != nil {
+ in, out := &in.Ignore, &out.Ignore
+ *out = new(string)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketSpec.
+func (in *BucketSpec) DeepCopy() *BucketSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(BucketSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BucketStatus) DeepCopyInto(out *BucketStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Artifact != nil {
+ in, out := &in.Artifact, &out.Artifact
+ *out = new(meta.Artifact)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ObservedIgnore != nil {
+ in, out := &in.ObservedIgnore, &out.ObservedIgnore
+ *out = new(string)
+ **out = **in
+ }
+ out.ReconcileRequestStatus = in.ReconcileRequestStatus
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketStatus.
+func (in *BucketStatus) DeepCopy() *BucketStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(BucketStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExternalArtifact) DeepCopyInto(out *ExternalArtifact) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalArtifact.
+func (in *ExternalArtifact) DeepCopy() *ExternalArtifact {
+ if in == nil {
+ return nil
+ }
+ out := new(ExternalArtifact)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ExternalArtifact) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExternalArtifactList) DeepCopyInto(out *ExternalArtifactList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ExternalArtifact, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalArtifactList.
+func (in *ExternalArtifactList) DeepCopy() *ExternalArtifactList {
+ if in == nil {
+ return nil
+ }
+ out := new(ExternalArtifactList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ExternalArtifactList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExternalArtifactSpec) DeepCopyInto(out *ExternalArtifactSpec) {
+ *out = *in
+ if in.SourceRef != nil {
+ in, out := &in.SourceRef, &out.SourceRef
+ *out = new(meta.NamespacedObjectKindReference)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalArtifactSpec.
+func (in *ExternalArtifactSpec) DeepCopy() *ExternalArtifactSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ExternalArtifactSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExternalArtifactStatus) DeepCopyInto(out *ExternalArtifactStatus) {
+ *out = *in
+ if in.Artifact != nil {
+ in, out := &in.Artifact, &out.Artifact
+ *out = new(meta.Artifact)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalArtifactStatus.
+func (in *ExternalArtifactStatus) DeepCopy() *ExternalArtifactStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ExternalArtifactStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GitRepository) DeepCopyInto(out *GitRepository) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepository.
+func (in *GitRepository) DeepCopy() *GitRepository {
+ if in == nil {
+ return nil
+ }
+ out := new(GitRepository)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *GitRepository) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GitRepositoryInclude) DeepCopyInto(out *GitRepositoryInclude) {
+ *out = *in
+ out.GitRepositoryRef = in.GitRepositoryRef
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryInclude.
+func (in *GitRepositoryInclude) DeepCopy() *GitRepositoryInclude {
+ if in == nil {
+ return nil
+ }
+ out := new(GitRepositoryInclude)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GitRepositoryList) DeepCopyInto(out *GitRepositoryList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]GitRepository, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryList.
+func (in *GitRepositoryList) DeepCopy() *GitRepositoryList {
+ if in == nil {
+ return nil
+ }
+ out := new(GitRepositoryList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *GitRepositoryList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GitRepositoryRef) DeepCopyInto(out *GitRepositoryRef) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryRef.
+func (in *GitRepositoryRef) DeepCopy() *GitRepositoryRef {
+ if in == nil {
+ return nil
+ }
+ out := new(GitRepositoryRef)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GitRepositorySpec) DeepCopyInto(out *GitRepositorySpec) {
+ *out = *in
+ if in.SecretRef != nil {
+ in, out := &in.SecretRef, &out.SecretRef
+ *out = new(meta.LocalObjectReference)
+ **out = **in
+ }
+ out.Interval = in.Interval
+ if in.Timeout != nil {
+ in, out := &in.Timeout, &out.Timeout
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.Reference != nil {
+ in, out := &in.Reference, &out.Reference
+ *out = new(GitRepositoryRef)
+ **out = **in
+ }
+ if in.Verification != nil {
+ in, out := &in.Verification, &out.Verification
+ *out = new(GitRepositoryVerification)
+ **out = **in
+ }
+ if in.ProxySecretRef != nil {
+ in, out := &in.ProxySecretRef, &out.ProxySecretRef
+ *out = new(meta.LocalObjectReference)
+ **out = **in
+ }
+ if in.Ignore != nil {
+ in, out := &in.Ignore, &out.Ignore
+ *out = new(string)
+ **out = **in
+ }
+ if in.Include != nil {
+ in, out := &in.Include, &out.Include
+ *out = make([]GitRepositoryInclude, len(*in))
+ copy(*out, *in)
+ }
+ if in.SparseCheckout != nil {
+ in, out := &in.SparseCheckout, &out.SparseCheckout
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositorySpec.
+func (in *GitRepositorySpec) DeepCopy() *GitRepositorySpec {
+ if in == nil {
+ return nil
+ }
+ out := new(GitRepositorySpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GitRepositoryStatus) DeepCopyInto(out *GitRepositoryStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Artifact != nil {
+ in, out := &in.Artifact, &out.Artifact
+ *out = new(meta.Artifact)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.IncludedArtifacts != nil {
+ in, out := &in.IncludedArtifacts, &out.IncludedArtifacts
+ *out = make([]*meta.Artifact, len(*in))
+ for i := range *in {
+ if (*in)[i] != nil {
+ in, out := &(*in)[i], &(*out)[i]
+ *out = new(meta.Artifact)
+ (*in).DeepCopyInto(*out)
+ }
+ }
+ }
+ if in.ObservedIgnore != nil {
+ in, out := &in.ObservedIgnore, &out.ObservedIgnore
+ *out = new(string)
+ **out = **in
+ }
+ if in.ObservedInclude != nil {
+ in, out := &in.ObservedInclude, &out.ObservedInclude
+ *out = make([]GitRepositoryInclude, len(*in))
+ copy(*out, *in)
+ }
+ if in.ObservedSparseCheckout != nil {
+ in, out := &in.ObservedSparseCheckout, &out.ObservedSparseCheckout
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.SourceVerificationMode != nil {
+ in, out := &in.SourceVerificationMode, &out.SourceVerificationMode
+ *out = new(GitVerificationMode)
+ **out = **in
+ }
+ out.ReconcileRequestStatus = in.ReconcileRequestStatus
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryStatus.
+func (in *GitRepositoryStatus) DeepCopy() *GitRepositoryStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(GitRepositoryStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *GitRepositoryVerification) DeepCopyInto(out *GitRepositoryVerification) {
+ *out = *in
+ out.SecretRef = in.SecretRef
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryVerification.
+func (in *GitRepositoryVerification) DeepCopy() *GitRepositoryVerification {
+ if in == nil {
+ return nil
+ }
+ out := new(GitRepositoryVerification)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HelmChart) DeepCopyInto(out *HelmChart) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChart.
+func (in *HelmChart) DeepCopy() *HelmChart {
+ if in == nil {
+ return nil
+ }
+ out := new(HelmChart)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *HelmChart) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HelmChartList) DeepCopyInto(out *HelmChartList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]HelmChart, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartList.
+func (in *HelmChartList) DeepCopy() *HelmChartList {
+ if in == nil {
+ return nil
+ }
+ out := new(HelmChartList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *HelmChartList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HelmChartSpec) DeepCopyInto(out *HelmChartSpec) {
+ *out = *in
+ out.SourceRef = in.SourceRef
+ out.Interval = in.Interval
+ if in.ValuesFiles != nil {
+ in, out := &in.ValuesFiles, &out.ValuesFiles
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Verify != nil {
+ in, out := &in.Verify, &out.Verify
+ *out = new(OCIRepositoryVerification)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartSpec.
+func (in *HelmChartSpec) DeepCopy() *HelmChartSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(HelmChartSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HelmChartStatus) DeepCopyInto(out *HelmChartStatus) {
+ *out = *in
+ if in.ObservedValuesFiles != nil {
+ in, out := &in.ObservedValuesFiles, &out.ObservedValuesFiles
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Artifact != nil {
+ in, out := &in.Artifact, &out.Artifact
+ *out = new(meta.Artifact)
+ (*in).DeepCopyInto(*out)
+ }
+ out.ReconcileRequestStatus = in.ReconcileRequestStatus
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartStatus.
+func (in *HelmChartStatus) DeepCopy() *HelmChartStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(HelmChartStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HelmRepository) DeepCopyInto(out *HelmRepository) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepository.
+func (in *HelmRepository) DeepCopy() *HelmRepository {
+ if in == nil {
+ return nil
+ }
+ out := new(HelmRepository)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *HelmRepository) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HelmRepositoryList) DeepCopyInto(out *HelmRepositoryList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]HelmRepository, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepositoryList.
+func (in *HelmRepositoryList) DeepCopy() *HelmRepositoryList {
+ if in == nil {
+ return nil
+ }
+ out := new(HelmRepositoryList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *HelmRepositoryList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HelmRepositorySpec) DeepCopyInto(out *HelmRepositorySpec) {
+ *out = *in
+ if in.SecretRef != nil {
+ in, out := &in.SecretRef, &out.SecretRef
+ *out = new(meta.LocalObjectReference)
+ **out = **in
+ }
+ if in.CertSecretRef != nil {
+ in, out := &in.CertSecretRef, &out.CertSecretRef
+ *out = new(meta.LocalObjectReference)
+ **out = **in
+ }
+ out.Interval = in.Interval
+ if in.Timeout != nil {
+ in, out := &in.Timeout, &out.Timeout
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.AccessFrom != nil {
+ in, out := &in.AccessFrom, &out.AccessFrom
+ *out = new(acl.AccessFrom)
+ (*in).DeepCopyInto(*out)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepositorySpec.
+func (in *HelmRepositorySpec) DeepCopy() *HelmRepositorySpec {
+ if in == nil {
+ return nil
+ }
+ out := new(HelmRepositorySpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *HelmRepositoryStatus) DeepCopyInto(out *HelmRepositoryStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Artifact != nil {
+ in, out := &in.Artifact, &out.Artifact
+ *out = new(meta.Artifact)
+ (*in).DeepCopyInto(*out)
+ }
+ out.ReconcileRequestStatus = in.ReconcileRequestStatus
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepositoryStatus.
+func (in *HelmRepositoryStatus) DeepCopy() *HelmRepositoryStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(HelmRepositoryStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *LocalHelmChartSourceReference) DeepCopyInto(out *LocalHelmChartSourceReference) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalHelmChartSourceReference.
+func (in *LocalHelmChartSourceReference) DeepCopy() *LocalHelmChartSourceReference {
+ if in == nil {
+ return nil
+ }
+ out := new(LocalHelmChartSourceReference)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OCILayerSelector) DeepCopyInto(out *OCILayerSelector) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCILayerSelector.
+func (in *OCILayerSelector) DeepCopy() *OCILayerSelector {
+ if in == nil {
+ return nil
+ }
+ out := new(OCILayerSelector)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OCIRepository) DeepCopyInto(out *OCIRepository) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepository.
+func (in *OCIRepository) DeepCopy() *OCIRepository {
+ if in == nil {
+ return nil
+ }
+ out := new(OCIRepository)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OCIRepository) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OCIRepositoryList) DeepCopyInto(out *OCIRepositoryList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]OCIRepository, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryList.
+func (in *OCIRepositoryList) DeepCopy() *OCIRepositoryList {
+ if in == nil {
+ return nil
+ }
+ out := new(OCIRepositoryList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *OCIRepositoryList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OCIRepositoryRef) DeepCopyInto(out *OCIRepositoryRef) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryRef.
+func (in *OCIRepositoryRef) DeepCopy() *OCIRepositoryRef {
+ if in == nil {
+ return nil
+ }
+ out := new(OCIRepositoryRef)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OCIRepositorySpec) DeepCopyInto(out *OCIRepositorySpec) {
+ *out = *in
+ if in.Reference != nil {
+ in, out := &in.Reference, &out.Reference
+ *out = new(OCIRepositoryRef)
+ **out = **in
+ }
+ if in.LayerSelector != nil {
+ in, out := &in.LayerSelector, &out.LayerSelector
+ *out = new(OCILayerSelector)
+ **out = **in
+ }
+ if in.SecretRef != nil {
+ in, out := &in.SecretRef, &out.SecretRef
+ *out = new(meta.LocalObjectReference)
+ **out = **in
+ }
+ if in.Verify != nil {
+ in, out := &in.Verify, &out.Verify
+ *out = new(OCIRepositoryVerification)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.CertSecretRef != nil {
+ in, out := &in.CertSecretRef, &out.CertSecretRef
+ *out = new(meta.LocalObjectReference)
+ **out = **in
+ }
+ if in.ProxySecretRef != nil {
+ in, out := &in.ProxySecretRef, &out.ProxySecretRef
+ *out = new(meta.LocalObjectReference)
+ **out = **in
+ }
+ out.Interval = in.Interval
+ if in.Timeout != nil {
+ in, out := &in.Timeout, &out.Timeout
+ *out = new(metav1.Duration)
+ **out = **in
+ }
+ if in.Ignore != nil {
+ in, out := &in.Ignore, &out.Ignore
+ *out = new(string)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositorySpec.
+func (in *OCIRepositorySpec) DeepCopy() *OCIRepositorySpec {
+ if in == nil {
+ return nil
+ }
+ out := new(OCIRepositorySpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OCIRepositoryStatus) DeepCopyInto(out *OCIRepositoryStatus) {
+ *out = *in
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]metav1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Artifact != nil {
+ in, out := &in.Artifact, &out.Artifact
+ *out = new(meta.Artifact)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ObservedIgnore != nil {
+ in, out := &in.ObservedIgnore, &out.ObservedIgnore
+ *out = new(string)
+ **out = **in
+ }
+ if in.ObservedLayerSelector != nil {
+ in, out := &in.ObservedLayerSelector, &out.ObservedLayerSelector
+ *out = new(OCILayerSelector)
+ **out = **in
+ }
+ out.ReconcileRequestStatus = in.ReconcileRequestStatus
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryStatus.
+func (in *OCIRepositoryStatus) DeepCopy() *OCIRepositoryStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(OCIRepositoryStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OCIRepositoryVerification) DeepCopyInto(out *OCIRepositoryVerification) {
+ *out = *in
+ if in.SecretRef != nil {
+ in, out := &in.SecretRef, &out.SecretRef
+ *out = new(meta.LocalObjectReference)
+ **out = **in
+ }
+ if in.MatchOIDCIdentity != nil {
+ in, out := &in.MatchOIDCIdentity, &out.MatchOIDCIdentity
+ *out = make([]OIDCIdentityMatch, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryVerification.
+func (in *OCIRepositoryVerification) DeepCopy() *OCIRepositoryVerification {
+ if in == nil {
+ return nil
+ }
+ out := new(OCIRepositoryVerification)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OIDCIdentityMatch) DeepCopyInto(out *OIDCIdentityMatch) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCIdentityMatch.
+func (in *OIDCIdentityMatch) DeepCopy() *OIDCIdentityMatch {
+ if in == nil {
+ return nil
+ }
+ out := new(OIDCIdentityMatch)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/api/v1beta1/bucket_types.go b/api/v1beta1/bucket_types.go
index 0d5f3de81..e64321c9d 100644
--- a/api/v1beta1/bucket_types.go
+++ b/api/v1beta1/bucket_types.go
@@ -193,13 +193,8 @@ func (in *Bucket) GetInterval() metav1.Duration {
}
// +genclient
-// +genclient:Namespaced
// +kubebuilder:object:root=true
-// +kubebuilder:subresource:status
-// +kubebuilder:printcolumn:name="Endpoint",type=string,JSONPath=`.spec.endpoint`
-// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
-// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
-// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
+// +kubebuilder:skipversion
// Bucket is the Schema for the buckets API
type Bucket struct {
diff --git a/api/v1beta1/doc.go b/api/v1beta1/doc.go
index 7a768a45d..f604a2624 100644
--- a/api/v1beta1/doc.go
+++ b/api/v1beta1/doc.go
@@ -15,6 +15,9 @@ limitations under the License.
*/
// Package v1beta1 contains API Schema definitions for the source v1beta1 API group
+//
+// Deprecated: v1beta1 is no longer supported, use v1 instead.
+//
// +kubebuilder:object:generate=true
// +groupName=source.toolkit.fluxcd.io
package v1beta1
diff --git a/api/v1beta1/gitrepository_types.go b/api/v1beta1/gitrepository_types.go
index c84055e03..05cce7c60 100644
--- a/api/v1beta1/gitrepository_types.go
+++ b/api/v1beta1/gitrepository_types.go
@@ -265,14 +265,9 @@ func (in *GitRepository) GetInterval() metav1.Duration {
}
// +genclient
-// +genclient:Namespaced
// +kubebuilder:object:root=true
// +kubebuilder:resource:shortName=gitrepo
-// +kubebuilder:subresource:status
-// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url`
-// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
-// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
-// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
+// +kubebuilder:skipversion
// GitRepository is the Schema for the gitrepositories API
type GitRepository struct {
diff --git a/api/v1beta1/helmchart_types.go b/api/v1beta1/helmchart_types.go
index 8d4c0a02d..22e5dda58 100644
--- a/api/v1beta1/helmchart_types.go
+++ b/api/v1beta1/helmchart_types.go
@@ -231,17 +231,9 @@ func (in *HelmChart) GetValuesFiles() []string {
}
// +genclient
-// +genclient:Namespaced
// +kubebuilder:object:root=true
// +kubebuilder:resource:shortName=hc
-// +kubebuilder:subresource:status
-// +kubebuilder:printcolumn:name="Chart",type=string,JSONPath=`.spec.chart`
-// +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.spec.version`
-// +kubebuilder:printcolumn:name="Source Kind",type=string,JSONPath=`.spec.sourceRef.kind`
-// +kubebuilder:printcolumn:name="Source Name",type=string,JSONPath=`.spec.sourceRef.name`
-// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
-// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
-// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
+// +kubebuilder:skipversion
// HelmChart is the Schema for the helmcharts API
type HelmChart struct {
diff --git a/api/v1beta1/helmrepository_types.go b/api/v1beta1/helmrepository_types.go
index 62b0e9a6d..4530b82a9 100644
--- a/api/v1beta1/helmrepository_types.go
+++ b/api/v1beta1/helmrepository_types.go
@@ -43,7 +43,7 @@ type HelmRepositorySpec struct {
// For HTTP/S basic auth the secret must contain username and
// password fields.
// For TLS the secret must contain a certFile and keyFile, and/or
- // caCert fields.
+ // caFile fields.
// +optional
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
@@ -181,14 +181,9 @@ func (in *HelmRepository) GetInterval() metav1.Duration {
}
// +genclient
-// +genclient:Namespaced
// +kubebuilder:object:root=true
// +kubebuilder:resource:shortName=helmrepo
-// +kubebuilder:subresource:status
-// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url`
-// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
-// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
-// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
+// +kubebuilder:skipversion
// HelmRepository is the Schema for the helmrepositories API
type HelmRepository struct {
diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go
index 3fd54793d..10be7301e 100644
--- a/api/v1beta1/zz_generated.deepcopy.go
+++ b/api/v1beta1/zz_generated.deepcopy.go
@@ -1,8 +1,7 @@
//go:build !ignore_autogenerated
-// +build !ignore_autogenerated
/*
-Copyright 2022 The Flux authors
+Copyright 2025 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/api/v1beta2/artifact_types.go b/api/v1beta2/artifact_types.go
index 0832b6ce5..cc88d2a0c 100644
--- a/api/v1beta2/artifact_types.go
+++ b/api/v1beta2/artifact_types.go
@@ -18,12 +18,16 @@ package v1beta2
import (
"path"
+ "regexp"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// Artifact represents the output of a Source reconciliation.
+//
+// Deprecated: use Artifact from api/v1 instead. This type will be removed in
+// a future release.
type Artifact struct {
// Path is the relative file path of the Artifact. It can be used to locate
// the file in the root of the Artifact storage on the local file system of
@@ -43,8 +47,14 @@ type Artifact struct {
Revision string `json:"revision"`
// Checksum is the SHA256 checksum of the Artifact file.
+ // Deprecated: use Artifact.Digest instead.
// +optional
- Checksum string `json:"checksum"`
+ Checksum string `json:"checksum,omitempty"`
+
+ // Digest is the digest of the file in the form of ':'.
+ // +optional
+ // +kubebuilder:validation:Pattern="^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$"
+ Digest string `json:"digest,omitempty"`
// LastUpdateTime is the timestamp corresponding to the last update of the
// Artifact.
@@ -66,7 +76,7 @@ func (in *Artifact) HasRevision(revision string) bool {
if in == nil {
return false
}
- return in.Revision == revision
+ return TransformLegacyRevision(in.Revision) == TransformLegacyRevision(revision)
}
// HasChecksum returns if the given checksum matches the current Checksum of
@@ -90,3 +100,60 @@ func ArtifactDir(kind, namespace, name string) string {
func ArtifactPath(kind, namespace, name, filename string) string {
return path.Join(ArtifactDir(kind, namespace, name), filename)
}
+
+// TransformLegacyRevision transforms a "legacy" revision string into a "new"
+// revision string. It accepts the following formats:
+//
+// - main/5394cb7f48332b2de7c17dd8b8384bbc84b7e738
+// - feature/branch/5394cb7f48332b2de7c17dd8b8384bbc84b7e738
+// - HEAD/5394cb7f48332b2de7c17dd8b8384bbc84b7e738
+// - tag/55609ff9d959589ed917ce32e6bc0f0a36809565f308602c15c3668965979edc
+// - d52bde83c5b2bd0fa7910264e0afc3ac9cfe9b6636ca29c05c09742f01d5a4bd
+//
+// Which are transformed into the following formats respectively:
+//
+// - main@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738
+// - feature/branch@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738
+// - sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738
+// - tag@sha256:55609ff9d959589ed917ce32e6bc0f0a36809565f308602c15c3668965979edc
+// - sha256:d52bde83c5b2bd0fa7910264e0afc3ac9cfe9b6636ca29c05c09742f01d5a4bd
+//
+// Deprecated, this function exists for backwards compatibility with existing
+// resources, and to provide a transition period. Will be removed in a future
+// release.
+func TransformLegacyRevision(rev string) string {
+ if rev != "" && strings.LastIndex(rev, ":") == -1 {
+ if i := strings.LastIndex(rev, "/"); i >= 0 {
+ sha := rev[i+1:]
+ if algo := determineSHAType(sha); algo != "" {
+ if name := rev[:i]; name != "HEAD" {
+ return name + "@" + algo + ":" + sha
+ }
+ return algo + ":" + sha
+ }
+ }
+ if algo := determineSHAType(rev); algo != "" {
+ return algo + ":" + rev
+ }
+ }
+ return rev
+}
+
+// isAlphaNumHex returns true if the given string only contains 0-9 and a-f
+// characters.
+var isAlphaNumHex = regexp.MustCompile(`^[0-9a-f]+$`).MatchString
+
+// determineSHAType returns the SHA algorithm used to compute the provided hex.
+// The determination is heuristic and based on the length of the hex string. If
+// the size is not recognized, an empty string is returned.
+func determineSHAType(hex string) string {
+ if isAlphaNumHex(hex) {
+ switch len(hex) {
+ case 40:
+ return "sha1"
+ case 64:
+ return "sha256"
+ }
+ }
+ return ""
+}
diff --git a/api/v1beta2/artifact_types_test.go b/api/v1beta2/artifact_types_test.go
new file mode 100644
index 000000000..ccf578de3
--- /dev/null
+++ b/api/v1beta2/artifact_types_test.go
@@ -0,0 +1,78 @@
+/*
+Copyright 2023 The Flux authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta2
+
+import "testing"
+
+func TestTransformLegacyRevision(t *testing.T) {
+ tests := []struct {
+ rev string
+ want string
+ }{
+ {
+ rev: "HEAD/5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
+ want: "sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
+ },
+ {
+ rev: "main/5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
+ want: "main@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
+ },
+ {
+ rev: "main@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
+ want: "main@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
+ },
+ {
+ rev: "feature/branch/5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
+ want: "feature/branch@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
+ },
+ {
+ rev: "feature/branch@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
+ want: "feature/branch@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
+ },
+ {
+ rev: "5ac85ca617f3774baff4ae0a420b810b2546dbc9af9f346b1d55c5ed9873c55c",
+ want: "sha256:5ac85ca617f3774baff4ae0a420b810b2546dbc9af9f346b1d55c5ed9873c55c",
+ },
+ {
+ rev: "v1.0.0",
+ want: "v1.0.0",
+ },
+ {
+ rev: "v1.0.0-rc1",
+ want: "v1.0.0-rc1",
+ },
+ {
+ rev: "v1.0.0-rc1+metadata",
+ want: "v1.0.0-rc1+metadata",
+ },
+ {
+ rev: "arbitrary/revision",
+ want: "arbitrary/revision",
+ },
+ {
+ rev: "5394cb7f48332b2de7c17dd8b8384bbc84b7xxxx",
+ want: "5394cb7f48332b2de7c17dd8b8384bbc84b7xxxx",
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.rev, func(t *testing.T) {
+ if got := TransformLegacyRevision(tt.rev); got != tt.want {
+ t.Errorf("TransformLegacyRevision() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/api/v1beta2/bucket_types.go b/api/v1beta2/bucket_types.go
index 2ea66e465..6495abdd0 100644
--- a/api/v1beta2/bucket_types.go
+++ b/api/v1beta2/bucket_types.go
@@ -23,6 +23,8 @@ import (
"github.com/fluxcd/pkg/apis/acl"
"github.com/fluxcd/pkg/apis/meta"
+
+ apiv1 "github.com/fluxcd/source-controller/api/v1"
)
const (
@@ -31,22 +33,48 @@ const (
)
const (
+ // BucketProviderGeneric for any S3 API compatible storage Bucket.
+ BucketProviderGeneric string = apiv1.BucketProviderGeneric
+ // BucketProviderAmazon for an AWS S3 object storage Bucket.
+ // Provides support for retrieving credentials from the AWS EC2 service.
+ BucketProviderAmazon string = apiv1.BucketProviderAmazon
+ // BucketProviderGoogle for a Google Cloud Storage Bucket.
+ // Provides support for authentication using a workload identity.
+ BucketProviderGoogle string = apiv1.BucketProviderGoogle
+ // BucketProviderAzure for an Azure Blob Storage Bucket.
+ // Provides support for authentication using a Service Principal,
+ // Managed Identity or Shared Key.
+ BucketProviderAzure string = apiv1.BucketProviderAzure
+
// GenericBucketProvider for any S3 API compatible storage Bucket.
- GenericBucketProvider string = "generic"
+ //
+ // Deprecated: use BucketProviderGeneric.
+ GenericBucketProvider string = apiv1.BucketProviderGeneric
// AmazonBucketProvider for an AWS S3 object storage Bucket.
// Provides support for retrieving credentials from the AWS EC2 service.
- AmazonBucketProvider string = "aws"
+ //
+ // Deprecated: use BucketProviderAmazon.
+ AmazonBucketProvider string = apiv1.BucketProviderAmazon
// GoogleBucketProvider for a Google Cloud Storage Bucket.
// Provides support for authentication using a workload identity.
- GoogleBucketProvider string = "gcp"
+ //
+ // Deprecated: use BucketProviderGoogle.
+ GoogleBucketProvider string = apiv1.BucketProviderGoogle
// AzureBucketProvider for an Azure Blob Storage Bucket.
// Provides support for authentication using a Service Principal,
// Managed Identity or Shared Key.
- AzureBucketProvider string = "azure"
+ //
+ // Deprecated: use BucketProviderAzure.
+ AzureBucketProvider string = apiv1.BucketProviderAzure
)
// BucketSpec specifies the required configuration to produce an Artifact for
// an object storage bucket.
+// +kubebuilder:validation:XValidation:rule="self.provider == 'aws' || self.provider == 'generic' || !has(self.sts)", message="STS configuration is only supported for the 'aws' and 'generic' Bucket providers"
+// +kubebuilder:validation:XValidation:rule="self.provider != 'aws' || !has(self.sts) || self.sts.provider == 'aws'", message="'aws' is the only supported STS provider for the 'aws' Bucket provider"
+// +kubebuilder:validation:XValidation:rule="self.provider != 'generic' || !has(self.sts) || self.sts.provider == 'ldap'", message="'ldap' is the only supported STS provider for the 'generic' Bucket provider"
+// +kubebuilder:validation:XValidation:rule="!has(self.sts) || self.sts.provider != 'aws' || !has(self.sts.secretRef)", message="spec.sts.secretRef is not required for the 'aws' STS provider"
+// +kubebuilder:validation:XValidation:rule="!has(self.sts) || self.sts.provider != 'aws' || !has(self.sts.certSecretRef)", message="spec.sts.certSecretRef is not required for the 'aws' STS provider"
type BucketSpec struct {
// Provider of the object storage bucket.
// Defaults to 'generic', which expects an S3 (API) compatible object
@@ -64,6 +92,14 @@ type BucketSpec struct {
// +required
Endpoint string `json:"endpoint"`
+ // STS specifies the required configuration to use a Security Token
+ // Service for fetching temporary credentials to authenticate in a
+ // Bucket provider.
+ //
+ // This field is only supported for the `aws` and `generic` providers.
+ // +optional
+ STS *BucketSTSSpec `json:"sts,omitempty"`
+
// Insecure allows connecting to a non-TLS HTTP Endpoint.
// +optional
Insecure bool `json:"insecure,omitempty"`
@@ -72,17 +108,49 @@ type BucketSpec struct {
// +optional
Region string `json:"region,omitempty"`
+ // Prefix to use for server-side filtering of files in the Bucket.
+ // +optional
+ Prefix string `json:"prefix,omitempty"`
+
// SecretRef specifies the Secret containing authentication credentials
// for the Bucket.
// +optional
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
- // Interval at which to check the Endpoint for updates.
+ // CertSecretRef can be given the name of a Secret containing
+ // either or both of
+ //
+ // - a PEM-encoded client certificate (`tls.crt`) and private
+ // key (`tls.key`);
+ // - a PEM-encoded CA certificate (`ca.crt`)
+ //
+ // and whichever are supplied, will be used for connecting to the
+ // bucket. The client cert and key are useful if you are
+ // authenticating with a certificate; the CA cert is useful if
+ // you are using a self-signed server certificate. The Secret must
+ // be of type `Opaque` or `kubernetes.io/tls`.
+ //
+ // This field is only supported for the `generic` provider.
+ // +optional
+ CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"`
+
+ // ProxySecretRef specifies the Secret containing the proxy configuration
+ // to use while communicating with the Bucket server.
+ // +optional
+ ProxySecretRef *meta.LocalObjectReference `json:"proxySecretRef,omitempty"`
+
+ // Interval at which the Bucket Endpoint is checked for updates.
+ // This interval is approximate and may be subject to jitter to ensure
+ // efficient use of resources.
+ // +kubebuilder:validation:Type=string
+ // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
// +required
Interval metav1.Duration `json:"interval"`
// Timeout for fetch operations, defaults to 60s.
// +kubebuilder:default="60s"
+ // +kubebuilder:validation:Type=string
+ // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$"
// +optional
Timeout *metav1.Duration `json:"timeout,omitempty"`
@@ -104,6 +172,45 @@ type BucketSpec struct {
AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"`
}
+// BucketSTSSpec specifies the required configuration to use a Security Token
+// Service for fetching temporary credentials to authenticate in a Bucket
+// provider.
+type BucketSTSSpec struct {
+ // Provider of the Security Token Service.
+ // +kubebuilder:validation:Enum=aws;ldap
+ // +required
+ Provider string `json:"provider"`
+
+ // Endpoint is the HTTP/S endpoint of the Security Token Service from
+ // where temporary credentials will be fetched.
+ // +required
+ // +kubebuilder:validation:Pattern="^(http|https)://.*$"
+ Endpoint string `json:"endpoint"`
+
+ // SecretRef specifies the Secret containing authentication credentials
+ // for the STS endpoint. This Secret must contain the fields `username`
+ // and `password` and is supported only for the `ldap` provider.
+ // +optional
+ SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
+
+ // CertSecretRef can be given the name of a Secret containing
+ // either or both of
+ //
+ // - a PEM-encoded client certificate (`tls.crt`) and private
+ // key (`tls.key`);
+ // - a PEM-encoded CA certificate (`ca.crt`)
+ //
+ // and whichever are supplied, will be used for connecting to the
+ // STS endpoint. The client cert and key are useful if you are
+ // authenticating with a certificate; the CA cert is useful if
+ // you are using a self-signed server certificate. The Secret must
+ // be of type `Opaque` or `kubernetes.io/tls`.
+ //
+ // This field is only supported for the `ldap` provider.
+ // +optional
+ CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"`
+}
+
// BucketStatus records the observed state of a Bucket.
type BucketStatus struct {
// ObservedGeneration is the last observed generation of the Bucket object.
@@ -122,7 +229,12 @@ type BucketStatus struct {
// Artifact represents the last successful Bucket reconciliation.
// +optional
- Artifact *Artifact `json:"artifact,omitempty"`
+ Artifact *meta.Artifact `json:"artifact,omitempty"`
+
+ // ObservedIgnore is the observed exclusion patterns used for constructing
+ // the source artifact.
+ // +optional
+ ObservedIgnore *string `json:"observedIgnore,omitempty"`
meta.ReconcileRequestStatus `json:",inline"`
}
@@ -153,15 +265,14 @@ func (in Bucket) GetRequeueAfter() time.Duration {
}
// GetArtifact returns the latest artifact from the source if present in the status sub-resource.
-func (in *Bucket) GetArtifact() *Artifact {
+func (in *Bucket) GetArtifact() *meta.Artifact {
return in.Status.Artifact
}
// +genclient
-// +genclient:Namespaced
-// +kubebuilder:storageversion
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
+// +kubebuilder:deprecatedversion:warning="v1beta2 Bucket is deprecated, upgrade to v1"
// +kubebuilder:printcolumn:name="Endpoint",type=string,JSONPath=`.spec.endpoint`
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
diff --git a/api/v1beta2/condition_types.go b/api/v1beta2/condition_types.go
index 711469eb8..2b93a1795 100644
--- a/api/v1beta2/condition_types.go
+++ b/api/v1beta2/condition_types.go
@@ -71,6 +71,10 @@ const (
// required fields, or the provided credentials do not match.
AuthenticationFailedReason string = "AuthenticationFailed"
+ // VerificationError signals that the Source's verification
+ // check failed.
+ VerificationError string = "VerificationError"
+
// DirCreationFailedReason signals a failure caused by a directory creation
// operation.
DirCreationFailedReason string = "DirectoryCreationFailed"
diff --git a/api/v1beta2/gitrepository_types.go b/api/v1beta2/gitrepository_types.go
index de736c861..89beeb9a7 100644
--- a/api/v1beta2/gitrepository_types.go
+++ b/api/v1beta2/gitrepository_types.go
@@ -55,18 +55,22 @@ type GitRepositorySpec struct {
// SecretRef specifies the Secret containing authentication credentials for
// the GitRepository.
// For HTTPS repositories the Secret must contain 'username' and 'password'
- // fields.
+ // fields for basic auth or 'bearerToken' field for token auth.
// For SSH repositories the Secret must contain 'identity'
// and 'known_hosts' fields.
// +optional
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
// Interval at which to check the GitRepository for updates.
+ // +kubebuilder:validation:Type=string
+ // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
// +required
Interval metav1.Duration `json:"interval"`
// Timeout for Git operations like cloning, defaults to 60s.
// +kubebuilder:default="60s"
+ // +kubebuilder:validation:Type=string
+ // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$"
// +optional
Timeout *metav1.Duration `json:"timeout,omitempty"`
@@ -93,6 +97,8 @@ type GitRepositorySpec struct {
// GitImplementation specifies which Git client library implementation to
// use. Defaults to 'go-git', valid values are ('go-git', 'libgit2').
+ // Deprecated: gitImplementation is deprecated now that 'go-git' is the
+ // only supported implementation.
// +kubebuilder:validation:Enum=go-git;libgit2
// +kubebuilder:default:=go-git
// +optional
@@ -100,7 +106,6 @@ type GitRepositorySpec struct {
// RecurseSubmodules enables the initialization of all submodules within
// the GitRepository as cloned from the URL, using their default settings.
- // This option is available only when using the 'go-git' GitImplementation.
// +optional
RecurseSubmodules bool `json:"recurseSubmodules,omitempty"`
@@ -150,9 +155,6 @@ func (in *GitRepositoryInclude) GetToPath() string {
// GitRepositoryRef specifies the Git reference to resolve and checkout.
type GitRepositoryRef struct {
// Branch to check out, defaults to 'master' if no other field is defined.
- //
- // When GitRepositorySpec.GitImplementation is set to 'go-git', a shallow
- // clone of the specified branch is performed.
// +optional
Branch string `json:"branch,omitempty"`
@@ -164,11 +166,17 @@ type GitRepositoryRef struct {
// +optional
SemVer string `json:"semver,omitempty"`
+ // Name of the reference to check out; takes precedence over Branch, Tag and SemVer.
+ //
+ // It must be a valid Git reference: https://git-scm.com/docs/git-check-ref-format#_description
+ // Examples: "refs/heads/main", "refs/tags/v0.1.0", "refs/pull/420/head", "refs/merge-requests/1/head"
+ // +optional
+ Name string `json:"name,omitempty"`
+
// Commit SHA to check out, takes precedence over all reference fields.
//
- // When GitRepositorySpec.GitImplementation is set to 'go-git', this can be
- // combined with Branch to shallow clone the branch, in which the commit is
- // expected to exist.
+ // This can be combined with Branch to shallow clone the branch, in which
+ // the commit is expected to exist.
// +optional
Commit string `json:"commit,omitempty"`
}
@@ -182,7 +190,7 @@ type GitRepositoryVerification struct {
// SecretRef specifies the Secret containing the public keys of trusted Git
// authors.
- SecretRef meta.LocalObjectReference `json:"secretRef,omitempty"`
+ SecretRef meta.LocalObjectReference `json:"secretRef"`
}
// GitRepositoryStatus records the observed state of a Git repository.
@@ -204,12 +212,12 @@ type GitRepositoryStatus struct {
// Artifact represents the last successful GitRepository reconciliation.
// +optional
- Artifact *Artifact `json:"artifact,omitempty"`
+ Artifact *meta.Artifact `json:"artifact,omitempty"`
// IncludedArtifacts contains a list of the last successfully included
// Artifacts as instructed by GitRepositorySpec.Include.
// +optional
- IncludedArtifacts []*Artifact `json:"includedArtifacts,omitempty"`
+ IncludedArtifacts []*meta.Artifact `json:"includedArtifacts,omitempty"`
// ContentConfigChecksum is a checksum of all the configurations related to
// the content of the source artifact:
@@ -220,9 +228,27 @@ type GitRepositoryStatus struct {
// be used to determine if the content of the included repository has
// changed.
// It has the format of `:`, for example: `sha256:`.
+ //
+ // Deprecated: Replaced with explicit fields for observed artifact content
+ // config in the status.
// +optional
ContentConfigChecksum string `json:"contentConfigChecksum,omitempty"`
+ // ObservedIgnore is the observed exclusion patterns used for constructing
+ // the source artifact.
+ // +optional
+ ObservedIgnore *string `json:"observedIgnore,omitempty"`
+
+ // ObservedRecurseSubmodules is the observed resource submodules
+ // configuration used to produce the current Artifact.
+ // +optional
+ ObservedRecurseSubmodules bool `json:"observedRecurseSubmodules,omitempty"`
+
+ // ObservedInclude is the observed list of GitRepository resources used to
+ // to produce the current Artifact.
+ // +optional
+ ObservedInclude []GitRepositoryInclude `json:"observedInclude,omitempty"`
+
meta.ReconcileRequestStatus `json:",inline"`
}
@@ -254,16 +280,15 @@ func (in GitRepository) GetRequeueAfter() time.Duration {
// GetArtifact returns the latest Artifact from the GitRepository if present in
// the status sub-resource.
-func (in *GitRepository) GetArtifact() *Artifact {
+func (in *GitRepository) GetArtifact() *meta.Artifact {
return in.Status.Artifact
}
// +genclient
-// +genclient:Namespaced
-// +kubebuilder:storageversion
// +kubebuilder:object:root=true
// +kubebuilder:resource:shortName=gitrepo
// +kubebuilder:subresource:status
+// +kubebuilder:deprecatedversion:warning="v1beta2 GitRepository is deprecated, upgrade to v1"
// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url`
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
diff --git a/api/v1beta2/helmchart_types.go b/api/v1beta2/helmchart_types.go
index 2ce5a942f..ac24b1c13 100644
--- a/api/v1beta2/helmchart_types.go
+++ b/api/v1beta2/helmchart_types.go
@@ -23,6 +23,8 @@ import (
"github.com/fluxcd/pkg/apis/acl"
"github.com/fluxcd/pkg/apis/meta"
+
+ apiv1 "github.com/fluxcd/source-controller/api/v1"
)
// HelmChartKind is the string representation of a HelmChart.
@@ -45,7 +47,11 @@ type HelmChartSpec struct {
// +required
SourceRef LocalHelmChartSourceReference `json:"sourceRef"`
- // Interval is the interval at which to check the Source for updates.
+ // Interval at which the HelmChart SourceRef is checked for updates.
+ // This interval is approximate and may be subject to jitter to ensure
+ // efficient use of resources.
+ // +kubebuilder:validation:Type=string
+ // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
// +required
Interval metav1.Duration `json:"interval"`
@@ -74,6 +80,11 @@ type HelmChartSpec struct {
// +deprecated
ValuesFile string `json:"valuesFile,omitempty"`
+ // IgnoreMissingValuesFiles controls whether to silently ignore missing values
+ // files rather than failing.
+ // +optional
+ IgnoreMissingValuesFiles bool `json:"ignoreMissingValuesFiles,omitempty"`
+
// Suspend tells the controller to suspend the reconciliation of this
// source.
// +optional
@@ -84,6 +95,14 @@ type HelmChartSpec struct {
// NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092
// +optional
AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"`
+
+ // Verify contains the secret name containing the trusted public keys
+ // used to verify the signature and specifies which provider to use to check
+ // whether OCI image is authentic.
+ // This field is only supported when using HelmRepository source with spec.type 'oci'.
+ // Chart dependencies, which are not bundled in the umbrella chart artifact, are not verified.
+ // +optional
+ Verify *apiv1.OCIRepositoryVerification `json:"verify,omitempty"`
}
const (
@@ -129,6 +148,12 @@ type HelmChartStatus struct {
// +optional
ObservedChartName string `json:"observedChartName,omitempty"`
+ // ObservedValuesFiles are the observed value files of the last successful
+ // reconciliation.
+ // It matches the chart in the last successfully reconciled artifact.
+ // +optional
+ ObservedValuesFiles []string `json:"observedValuesFiles,omitempty"`
+
// Conditions holds the conditions for the HelmChart.
// +optional
Conditions []metav1.Condition `json:"conditions,omitempty"`
@@ -141,7 +166,7 @@ type HelmChartStatus struct {
// Artifact represents the output of the last successful reconciliation.
// +optional
- Artifact *Artifact `json:"artifact,omitempty"`
+ Artifact *meta.Artifact `json:"artifact,omitempty"`
meta.ReconcileRequestStatus `json:",inline"`
}
@@ -174,7 +199,7 @@ func (in HelmChart) GetRequeueAfter() time.Duration {
// GetArtifact returns the latest artifact from the source if present in the
// status sub-resource.
-func (in *HelmChart) GetArtifact() *Artifact {
+func (in *HelmChart) GetArtifact() *meta.Artifact {
return in.Status.Artifact
}
@@ -190,11 +215,10 @@ func (in *HelmChart) GetValuesFiles() []string {
}
// +genclient
-// +genclient:Namespaced
-// +kubebuilder:storageversion
// +kubebuilder:object:root=true
// +kubebuilder:resource:shortName=hc
// +kubebuilder:subresource:status
+// +kubebuilder:deprecatedversion:warning="v1beta2 HelmChart is deprecated, upgrade to v1"
// +kubebuilder:printcolumn:name="Chart",type=string,JSONPath=`.spec.chart`
// +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.spec.version`
// +kubebuilder:printcolumn:name="Source Kind",type=string,JSONPath=`.spec.sourceRef.kind`
diff --git a/api/v1beta2/helmrepository_types.go b/api/v1beta2/helmrepository_types.go
index 87c0b16b8..56cbd928c 100644
--- a/api/v1beta2/helmrepository_types.go
+++ b/api/v1beta2/helmrepository_types.go
@@ -43,6 +43,7 @@ const (
type HelmRepositorySpec struct {
// URL of the Helm repository, a valid URL contains at least a protocol and
// host.
+ // +kubebuilder:validation:Pattern="^(http|https|oci)://.*$"
// +required
URL string `json:"url"`
@@ -50,11 +51,29 @@ type HelmRepositorySpec struct {
// for the HelmRepository.
// For HTTP/S basic auth the secret must contain 'username' and 'password'
// fields.
- // For TLS the secret must contain a 'certFile' and 'keyFile', and/or
- // 'caCert' fields.
+ // Support for TLS auth using the 'certFile' and 'keyFile', and/or 'caFile'
+ // keys is deprecated. Please use `.spec.certSecretRef` instead.
// +optional
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
+ // CertSecretRef can be given the name of a Secret containing
+ // either or both of
+ //
+ // - a PEM-encoded client certificate (`tls.crt`) and private
+ // key (`tls.key`);
+ // - a PEM-encoded CA certificate (`ca.crt`)
+ //
+ // and whichever are supplied, will be used for connecting to the
+ // registry. The client cert and key are useful if you are
+ // authenticating with a certificate; the CA cert is useful if
+ // you are using a self-signed server certificate. The Secret must
+ // be of type `Opaque` or `kubernetes.io/tls`.
+ //
+ // It takes precedence over the values specified in the Secret referred
+ // to by `.spec.secretRef`.
+ // +optional
+ CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"`
+
// PassCredentials allows the credentials from the SecretRef to be passed
// on to a host that does not match the host as defined in URL.
// This may be required if the host of the advertised chart URLs in the
@@ -64,12 +83,25 @@ type HelmRepositorySpec struct {
// +optional
PassCredentials bool `json:"passCredentials,omitempty"`
- // Interval at which to check the URL for updates.
- // +required
- Interval metav1.Duration `json:"interval"`
+ // Interval at which the HelmRepository URL is checked for updates.
+ // This interval is approximate and may be subject to jitter to ensure
+ // efficient use of resources.
+ // +kubebuilder:validation:Type=string
+ // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
+ // +optional
+ Interval metav1.Duration `json:"interval,omitempty"`
- // Timeout of the index fetch operation, defaults to 60s.
- // +kubebuilder:default:="60s"
+ // Insecure allows connecting to a non-TLS HTTP container registry.
+ // This field is only taken into account if the .spec.type field is set to 'oci'.
+ // +optional
+ Insecure bool `json:"insecure,omitempty"`
+
+ // Timeout is used for the index fetch operation for an HTTPS helm repository,
+ // and for remote OCI Repository operations like pulling for an OCI helm
+ // chart by the associated HelmChart.
+ // Its default value is 60s.
+ // +kubebuilder:validation:Type=string
+ // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$"
// +optional
Timeout *metav1.Duration `json:"timeout,omitempty"`
@@ -89,6 +121,14 @@ type HelmRepositorySpec struct {
// +kubebuilder:validation:Enum=default;oci
// +optional
Type string `json:"type,omitempty"`
+
+ // Provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'.
+ // This field is optional, and only taken into account if the .spec.type field is set to 'oci'.
+ // When not specified, defaults to 'generic'.
+ // +kubebuilder:validation:Enum=generic;aws;azure;gcp
+ // +kubebuilder:default:=generic
+ // +optional
+ Provider string `json:"provider,omitempty"`
}
// HelmRepositoryStatus records the observed state of the HelmRepository.
@@ -110,7 +150,7 @@ type HelmRepositoryStatus struct {
// Artifact represents the last successful HelmRepository reconciliation.
// +optional
- Artifact *Artifact `json:"artifact,omitempty"`
+ Artifact *meta.Artifact `json:"artifact,omitempty"`
meta.ReconcileRequestStatus `json:",inline"`
}
@@ -134,21 +174,32 @@ func (in *HelmRepository) SetConditions(conditions []metav1.Condition) {
// GetRequeueAfter returns the duration after which the source must be
// reconciled again.
func (in HelmRepository) GetRequeueAfter() time.Duration {
- return in.Spec.Interval.Duration
+ if in.Spec.Interval.Duration != 0 {
+ return in.Spec.Interval.Duration
+ }
+ return time.Minute
+}
+
+// GetTimeout returns the timeout duration used for various operations related
+// to this HelmRepository.
+func (in HelmRepository) GetTimeout() time.Duration {
+ if in.Spec.Timeout != nil {
+ return in.Spec.Timeout.Duration
+ }
+ return time.Minute
}
// GetArtifact returns the latest artifact from the source if present in the
// status sub-resource.
-func (in *HelmRepository) GetArtifact() *Artifact {
+func (in *HelmRepository) GetArtifact() *meta.Artifact {
return in.Status.Artifact
}
// +genclient
-// +genclient:Namespaced
-// +kubebuilder:storageversion
// +kubebuilder:object:root=true
// +kubebuilder:resource:shortName=helmrepo
// +kubebuilder:subresource:status
+// +kubebuilder:deprecatedversion:warning="v1beta2 HelmRepository is deprecated, upgrade to v1"
// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url`
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
diff --git a/api/v1beta2/ocirepository_types.go b/api/v1beta2/ocirepository_types.go
index 83ff7f3ff..760f0d8f1 100644
--- a/api/v1beta2/ocirepository_types.go
+++ b/api/v1beta2/ocirepository_types.go
@@ -22,6 +22,8 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/fluxcd/pkg/apis/meta"
+
+ apiv1 "github.com/fluxcd/source-controller/api/v1"
)
const (
@@ -45,6 +47,12 @@ const (
// AzureOCIProvider provides support for OCI authentication using a Azure Service Principal,
// Managed Identity or Shared Key.
AzureOCIProvider string = "azure"
+
+ // OCILayerExtract defines the operation type for extracting the content from an OCI artifact layer.
+ OCILayerExtract = "extract"
+
+ // OCILayerCopy defines the operation type for copying the content from an OCI artifact layer.
+ OCILayerCopy = "copy"
)
// OCIRepositorySpec defines the desired state of OCIRepository
@@ -60,6 +68,11 @@ type OCIRepositorySpec struct {
// +optional
Reference *OCIRepositoryRef `json:"ref,omitempty"`
+ // LayerSelector specifies which layer should be extracted from the OCI artifact.
+ // When not specified, the first layer found in the artifact is selected.
+ // +optional
+ LayerSelector *OCILayerSelector `json:"layerSelector,omitempty"`
+
// The provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'.
// When not specified, defaults to 'generic'.
// +kubebuilder:validation:Enum=generic;aws;azure;gcp
@@ -73,32 +86,53 @@ type OCIRepositorySpec struct {
// +optional
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
+ // Verify contains the secret name containing the trusted public keys
+ // used to verify the signature and specifies which provider to use to check
+ // whether OCI image is authentic.
+ // +optional
+ Verify *apiv1.OCIRepositoryVerification `json:"verify,omitempty"`
+
// ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate
// the image pull if the service account has attached pull secrets. For more information:
// https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account
// +optional
ServiceAccountName string `json:"serviceAccountName,omitempty"`
- // CertSecretRef can be given the name of a secret containing
+ // CertSecretRef can be given the name of a Secret containing
// either or both of
//
- // - a PEM-encoded client certificate (`certFile`) and private
- // key (`keyFile`);
- // - a PEM-encoded CA certificate (`caFile`)
+ // - a PEM-encoded client certificate (`tls.crt`) and private
+ // key (`tls.key`);
+ // - a PEM-encoded CA certificate (`ca.crt`)
//
- // and whichever are supplied, will be used for connecting to the
- // registry. The client cert and key are useful if you are
- // authenticating with a certificate; the CA cert is useful if
- // you are using a self-signed server certificate.
+ // and whichever are supplied, will be used for connecting to the
+ // registry. The client cert and key are useful if you are
+ // authenticating with a certificate; the CA cert is useful if
+ // you are using a self-signed server certificate. The Secret must
+ // be of type `Opaque` or `kubernetes.io/tls`.
+ //
+ // Note: Support for the `caFile`, `certFile` and `keyFile` keys have
+ // been deprecated.
// +optional
CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"`
- // The interval at which to check for image updates.
+ // ProxySecretRef specifies the Secret containing the proxy configuration
+ // to use while communicating with the container registry.
+ // +optional
+ ProxySecretRef *meta.LocalObjectReference `json:"proxySecretRef,omitempty"`
+
+ // Interval at which the OCIRepository URL is checked for updates.
+ // This interval is approximate and may be subject to jitter to ensure
+ // efficient use of resources.
+ // +kubebuilder:validation:Type=string
+ // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
// +required
Interval metav1.Duration `json:"interval"`
// The timeout for remote OCI Repository operations like pulling, defaults to 60s.
// +kubebuilder:default="60s"
+ // +kubebuilder:validation:Type=string
+ // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$"
// +optional
Timeout *metav1.Duration `json:"timeout,omitempty"`
@@ -108,6 +142,10 @@ type OCIRepositorySpec struct {
// +optional
Ignore *string `json:"ignore,omitempty"`
+ // Insecure allows connecting to a non-TLS HTTP container registry.
+ // +optional
+ Insecure bool `json:"insecure,omitempty"`
+
// This flag tells the controller to suspend the reconciliation of this source.
// +optional
Suspend bool `json:"suspend,omitempty"`
@@ -125,20 +163,30 @@ type OCIRepositoryRef struct {
// +optional
SemVer string `json:"semver,omitempty"`
+ // SemverFilter is a regex pattern to filter the tags within the SemVer range.
+ // +optional
+ SemverFilter string `json:"semverFilter,omitempty"`
+
// Tag is the image tag to pull, defaults to latest.
// +optional
Tag string `json:"tag,omitempty"`
}
-// OCIRepositoryVerification verifies the authenticity of an OCI Artifact
-type OCIRepositoryVerification struct {
- // Provider specifies the technology used to sign the OCI Artifact.
- // +kubebuilder:validation:Enum=cosign
- Provider string `json:"provider"`
+// OCILayerSelector specifies which layer should be extracted from an OCI Artifact
+type OCILayerSelector struct {
+ // MediaType specifies the OCI media type of the layer
+ // which should be extracted from the OCI Artifact. The
+ // first layer matching this type is selected.
+ // +optional
+ MediaType string `json:"mediaType,omitempty"`
- // SecretRef specifies the Kubernetes Secret containing the
- // trusted public keys.
- SecretRef meta.LocalObjectReference `json:"secretRef"`
+ // Operation specifies how the selected layer should be processed.
+ // By default, the layer compressed content is extracted to storage.
+ // When the operation is set to 'copy', the layer compressed content
+ // is persisted to storage as it is.
+ // +kubebuilder:validation:Enum=extract;copy
+ // +optional
+ Operation string `json:"operation,omitempty"`
}
// OCIRepositoryStatus defines the observed state of OCIRepository
@@ -157,7 +205,31 @@ type OCIRepositoryStatus struct {
// Artifact represents the output of the last successful OCI Repository sync.
// +optional
- Artifact *Artifact `json:"artifact,omitempty"`
+ Artifact *meta.Artifact `json:"artifact,omitempty"`
+
+ // ContentConfigChecksum is a checksum of all the configurations related to
+ // the content of the source artifact:
+ // - .spec.ignore
+ // - .spec.layerSelector
+ // observed in .status.observedGeneration version of the object. This can
+ // be used to determine if the content configuration has changed and the
+ // artifact needs to be rebuilt.
+ // It has the format of `:`, for example: `sha256:`.
+ //
+ // Deprecated: Replaced with explicit fields for observed artifact content
+ // config in the status.
+ // +optional
+ ContentConfigChecksum string `json:"contentConfigChecksum,omitempty"`
+
+ // ObservedIgnore is the observed exclusion patterns used for constructing
+ // the source artifact.
+ // +optional
+ ObservedIgnore *string `json:"observedIgnore,omitempty"`
+
+ // ObservedLayerSelector is the observed layer selector used for constructing
+ // the source artifact.
+ // +optional
+ ObservedLayerSelector *OCILayerSelector `json:"observedLayerSelector,omitempty"`
meta.ReconcileRequestStatus `json:",inline"`
}
@@ -188,16 +260,33 @@ func (in OCIRepository) GetRequeueAfter() time.Duration {
// GetArtifact returns the latest Artifact from the OCIRepository if present in
// the status sub-resource.
-func (in *OCIRepository) GetArtifact() *Artifact {
+func (in *OCIRepository) GetArtifact() *meta.Artifact {
return in.Status.Artifact
}
+// GetLayerMediaType returns the media type layer selector if found in spec.
+func (in *OCIRepository) GetLayerMediaType() string {
+ if in.Spec.LayerSelector == nil {
+ return ""
+ }
+
+ return in.Spec.LayerSelector.MediaType
+}
+
+// GetLayerOperation returns the layer selector operation (defaults to extract).
+func (in *OCIRepository) GetLayerOperation() string {
+ if in.Spec.LayerSelector == nil || in.Spec.LayerSelector.Operation == "" {
+ return OCILayerExtract
+ }
+
+ return in.Spec.LayerSelector.Operation
+}
+
// +genclient
-// +genclient:Namespaced
-// +kubebuilder:storageversion
// +kubebuilder:object:root=true
// +kubebuilder:resource:shortName=ocirepo
// +kubebuilder:subresource:status
+// +kubebuilder:deprecatedversion:warning="v1beta2 OCIRepository is deprecated, upgrade to v1"
// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url`
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
diff --git a/api/v1beta2/source.go b/api/v1beta2/source.go
index 76e2cc21e..4111c0998 100644
--- a/api/v1beta2/source.go
+++ b/api/v1beta2/source.go
@@ -33,6 +33,9 @@ const (
// interval. It must be supported by all kinds of the source.toolkit.fluxcd.io
// API group.
//
+// Deprecated: use the Source interface from api/v1 instead. This type will be
+// removed in a future release.
+//
// +k8s:deepcopy-gen=false
type Source interface {
runtime.Object
diff --git a/pkg/git/libgit2/managed/const.go b/api/v1beta2/sts_types.go
similarity index 55%
rename from pkg/git/libgit2/managed/const.go
rename to api/v1beta2/sts_types.go
index f41035da7..c07c05123 100644
--- a/pkg/git/libgit2/managed/const.go
+++ b/api/v1beta2/sts_types.go
@@ -1,5 +1,5 @@
/*
-Copyright 2022 The Flux authors
+Copyright 2024 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,14 +14,13 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package managed
+package v1beta2
const (
- // URLMaxLength represents the max length for the entire URL
- // when cloning Git repositories via HTTP(S).
- URLMaxLength = 2048
-
- // PathMaxLength represents the max length for the path element
- // when cloning Git repositories via SSH.
- PathMaxLength = 4096
+ // STSProviderAmazon represents the AWS provider for Security Token Service.
+ // Provides support for fetching temporary credentials from an AWS STS endpoint.
+ STSProviderAmazon string = "aws"
+ // STSProviderLDAP represents the LDAP provider for Security Token Service.
+ // Provides support for fetching temporary credentials from an LDAP endpoint.
+ STSProviderLDAP string = "ldap"
)
diff --git a/api/v1beta2/zz_generated.deepcopy.go b/api/v1beta2/zz_generated.deepcopy.go
index fc186d4df..0b874dd7e 100644
--- a/api/v1beta2/zz_generated.deepcopy.go
+++ b/api/v1beta2/zz_generated.deepcopy.go
@@ -1,8 +1,7 @@
//go:build !ignore_autogenerated
-// +build !ignore_autogenerated
/*
-Copyright 2022 The Flux authors
+Copyright 2025 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -24,6 +23,7 @@ package v1beta2
import (
"github.com/fluxcd/pkg/apis/acl"
"github.com/fluxcd/pkg/apis/meta"
+ apiv1 "github.com/fluxcd/source-controller/api/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
@@ -115,14 +115,54 @@ func (in *BucketList) DeepCopyObject() runtime.Object {
return nil
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BucketSTSSpec) DeepCopyInto(out *BucketSTSSpec) {
+ *out = *in
+ if in.SecretRef != nil {
+ in, out := &in.SecretRef, &out.SecretRef
+ *out = new(meta.LocalObjectReference)
+ **out = **in
+ }
+ if in.CertSecretRef != nil {
+ in, out := &in.CertSecretRef, &out.CertSecretRef
+ *out = new(meta.LocalObjectReference)
+ **out = **in
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketSTSSpec.
+func (in *BucketSTSSpec) DeepCopy() *BucketSTSSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(BucketSTSSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BucketSpec) DeepCopyInto(out *BucketSpec) {
*out = *in
+ if in.STS != nil {
+ in, out := &in.STS, &out.STS
+ *out = new(BucketSTSSpec)
+ (*in).DeepCopyInto(*out)
+ }
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
+ if in.CertSecretRef != nil {
+ in, out := &in.CertSecretRef, &out.CertSecretRef
+ *out = new(meta.LocalObjectReference)
+ **out = **in
+ }
+ if in.ProxySecretRef != nil {
+ in, out := &in.ProxySecretRef, &out.ProxySecretRef
+ *out = new(meta.LocalObjectReference)
+ **out = **in
+ }
out.Interval = in.Interval
if in.Timeout != nil {
in, out := &in.Timeout, &out.Timeout
@@ -163,9 +203,14 @@ func (in *BucketStatus) DeepCopyInto(out *BucketStatus) {
}
if in.Artifact != nil {
in, out := &in.Artifact, &out.Artifact
- *out = new(Artifact)
+ *out = new(meta.Artifact)
(*in).DeepCopyInto(*out)
}
+ if in.ObservedIgnore != nil {
+ in, out := &in.ObservedIgnore, &out.ObservedIgnore
+ *out = new(string)
+ **out = **in
+ }
out.ReconcileRequestStatus = in.ReconcileRequestStatus
}
@@ -332,20 +377,30 @@ func (in *GitRepositoryStatus) DeepCopyInto(out *GitRepositoryStatus) {
}
if in.Artifact != nil {
in, out := &in.Artifact, &out.Artifact
- *out = new(Artifact)
+ *out = new(meta.Artifact)
(*in).DeepCopyInto(*out)
}
if in.IncludedArtifacts != nil {
in, out := &in.IncludedArtifacts, &out.IncludedArtifacts
- *out = make([]*Artifact, len(*in))
+ *out = make([]*meta.Artifact, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
- *out = new(Artifact)
+ *out = new(meta.Artifact)
(*in).DeepCopyInto(*out)
}
}
}
+ if in.ObservedIgnore != nil {
+ in, out := &in.ObservedIgnore, &out.ObservedIgnore
+ *out = new(string)
+ **out = **in
+ }
+ if in.ObservedInclude != nil {
+ in, out := &in.ObservedInclude, &out.ObservedInclude
+ *out = make([]GitRepositoryInclude, len(*in))
+ copy(*out, *in)
+ }
out.ReconcileRequestStatus = in.ReconcileRequestStatus
}
@@ -449,6 +504,11 @@ func (in *HelmChartSpec) DeepCopyInto(out *HelmChartSpec) {
*out = new(acl.AccessFrom)
(*in).DeepCopyInto(*out)
}
+ if in.Verify != nil {
+ in, out := &in.Verify, &out.Verify
+ *out = new(apiv1.OCIRepositoryVerification)
+ (*in).DeepCopyInto(*out)
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartSpec.
@@ -464,6 +524,11 @@ func (in *HelmChartSpec) DeepCopy() *HelmChartSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HelmChartStatus) DeepCopyInto(out *HelmChartStatus) {
*out = *in
+ if in.ObservedValuesFiles != nil {
+ in, out := &in.ObservedValuesFiles, &out.ObservedValuesFiles
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]v1.Condition, len(*in))
@@ -473,7 +538,7 @@ func (in *HelmChartStatus) DeepCopyInto(out *HelmChartStatus) {
}
if in.Artifact != nil {
in, out := &in.Artifact, &out.Artifact
- *out = new(Artifact)
+ *out = new(meta.Artifact)
(*in).DeepCopyInto(*out)
}
out.ReconcileRequestStatus = in.ReconcileRequestStatus
@@ -556,6 +621,11 @@ func (in *HelmRepositorySpec) DeepCopyInto(out *HelmRepositorySpec) {
*out = new(meta.LocalObjectReference)
**out = **in
}
+ if in.CertSecretRef != nil {
+ in, out := &in.CertSecretRef, &out.CertSecretRef
+ *out = new(meta.LocalObjectReference)
+ **out = **in
+ }
out.Interval = in.Interval
if in.Timeout != nil {
in, out := &in.Timeout, &out.Timeout
@@ -591,7 +661,7 @@ func (in *HelmRepositoryStatus) DeepCopyInto(out *HelmRepositoryStatus) {
}
if in.Artifact != nil {
in, out := &in.Artifact, &out.Artifact
- *out = new(Artifact)
+ *out = new(meta.Artifact)
(*in).DeepCopyInto(*out)
}
out.ReconcileRequestStatus = in.ReconcileRequestStatus
@@ -622,6 +692,21 @@ func (in *LocalHelmChartSourceReference) DeepCopy() *LocalHelmChartSourceReferen
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *OCILayerSelector) DeepCopyInto(out *OCILayerSelector) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCILayerSelector.
+func (in *OCILayerSelector) DeepCopy() *OCILayerSelector {
+ if in == nil {
+ return nil
+ }
+ out := new(OCILayerSelector)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OCIRepository) DeepCopyInto(out *OCIRepository) {
*out = *in
@@ -704,16 +789,31 @@ func (in *OCIRepositorySpec) DeepCopyInto(out *OCIRepositorySpec) {
*out = new(OCIRepositoryRef)
**out = **in
}
+ if in.LayerSelector != nil {
+ in, out := &in.LayerSelector, &out.LayerSelector
+ *out = new(OCILayerSelector)
+ **out = **in
+ }
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
+ if in.Verify != nil {
+ in, out := &in.Verify, &out.Verify
+ *out = new(apiv1.OCIRepositoryVerification)
+ (*in).DeepCopyInto(*out)
+ }
if in.CertSecretRef != nil {
in, out := &in.CertSecretRef, &out.CertSecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
+ if in.ProxySecretRef != nil {
+ in, out := &in.ProxySecretRef, &out.ProxySecretRef
+ *out = new(meta.LocalObjectReference)
+ **out = **in
+ }
out.Interval = in.Interval
if in.Timeout != nil {
in, out := &in.Timeout, &out.Timeout
@@ -749,9 +849,19 @@ func (in *OCIRepositoryStatus) DeepCopyInto(out *OCIRepositoryStatus) {
}
if in.Artifact != nil {
in, out := &in.Artifact, &out.Artifact
- *out = new(Artifact)
+ *out = new(meta.Artifact)
(*in).DeepCopyInto(*out)
}
+ if in.ObservedIgnore != nil {
+ in, out := &in.ObservedIgnore, &out.ObservedIgnore
+ *out = new(string)
+ **out = **in
+ }
+ if in.ObservedLayerSelector != nil {
+ in, out := &in.ObservedLayerSelector, &out.ObservedLayerSelector
+ *out = new(OCILayerSelector)
+ **out = **in
+ }
out.ReconcileRequestStatus = in.ReconcileRequestStatus
}
@@ -764,19 +874,3 @@ func (in *OCIRepositoryStatus) DeepCopy() *OCIRepositoryStatus {
in.DeepCopyInto(out)
return out
}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *OCIRepositoryVerification) DeepCopyInto(out *OCIRepositoryVerification) {
- *out = *in
- out.SecretRef = in.SecretRef
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryVerification.
-func (in *OCIRepositoryVerification) DeepCopy() *OCIRepositoryVerification {
- if in == nil {
- return nil
- }
- out := new(OCIRepositoryVerification)
- in.DeepCopyInto(out)
- return out
-}
diff --git a/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml b/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml
index d8fc0f533..f578c8da0 100644
--- a/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml
+++ b/config/crd/bases/source.toolkit.fluxcd.io_buckets.yaml
@@ -1,11 +1,9 @@
-
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.7.0
- creationTimestamp: null
+ controller-gen.kubebuilder.io/version: v0.19.0
name: buckets.source.toolkit.fluxcd.io
spec:
group: source.toolkit.fluxcd.io
@@ -20,94 +18,122 @@ spec:
- jsonPath: .spec.endpoint
name: Endpoint
type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
- jsonPath: .status.conditions[?(@.type=="Ready")].status
name: Ready
type: string
- jsonPath: .status.conditions[?(@.type=="Ready")].message
name: Status
type: string
- - jsonPath: .metadata.creationTimestamp
- name: Age
- type: date
- name: v1beta1
+ name: v1
schema:
openAPIV3Schema:
- description: Bucket is the Schema for the buckets API
+ description: Bucket is the Schema for the buckets API.
properties:
apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
- description: BucketSpec defines the desired state of an S3 compatible
- bucket
+ description: |-
+ BucketSpec specifies the required configuration to produce an Artifact for
+ an object storage bucket.
properties:
- accessFrom:
- description: AccessFrom defines an Access Control List for allowing
- cross-namespace references to this object.
+ bucketName:
+ description: BucketName is the name of the object storage bucket.
+ type: string
+ certSecretRef:
+ description: |-
+ CertSecretRef can be given the name of a Secret containing
+ either or both of
+
+ - a PEM-encoded client certificate (`tls.crt`) and private
+ key (`tls.key`);
+ - a PEM-encoded CA certificate (`ca.crt`)
+
+ and whichever are supplied, will be used for connecting to the
+ bucket. The client cert and key are useful if you are
+ authenticating with a certificate; the CA cert is useful if
+ you are using a self-signed server certificate. The Secret must
+ be of type `Opaque` or `kubernetes.io/tls`.
+
+ This field is only supported for the `generic` provider.
properties:
- namespaceSelectors:
- description: NamespaceSelectors is the list of namespace selectors
- to which this ACL applies. Items in this list are evaluated
- using a logical OR operation.
- items:
- description: NamespaceSelector selects the namespaces to which
- this ACL applies. An empty map of MatchLabels matches all
- namespaces in a cluster.
- properties:
- matchLabels:
- additionalProperties:
- type: string
- description: MatchLabels is a map of {key,value} pairs.
- A single {key,value} in the matchLabels map is equivalent
- to an element of matchExpressions, whose key field is
- "key", the operator is "In", and the values array contains
- only "value". The requirements are ANDed.
- type: object
- type: object
- type: array
+ name:
+ description: Name of the referent.
+ type: string
required:
- - namespaceSelectors
+ - name
type: object
- bucketName:
- description: The bucket name.
- type: string
endpoint:
- description: The bucket endpoint address.
+ description: Endpoint is the object storage address the BucketName
+ is located at.
type: string
ignore:
- description: Ignore overrides the set of excluded patterns in the
- .sourceignore format (which is the same as .gitignore). If not provided,
- a default will be used, consult the documentation for your version
- to find out what those are.
+ description: |-
+ Ignore overrides the set of excluded patterns in the .sourceignore format
+ (which is the same as .gitignore). If not provided, a default will be used,
+ consult the documentation for your version to find out what those are.
type: string
insecure:
- description: Insecure allows connecting to a non-TLS S3 HTTP endpoint.
+ description: Insecure allows connecting to a non-TLS HTTP Endpoint.
type: boolean
interval:
- description: The interval at which to check for bucket updates.
+ description: |-
+ Interval at which the Bucket Endpoint is checked for updates.
+ This interval is approximate and may be subject to jitter to ensure
+ efficient use of resources.
+ pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$
+ type: string
+ prefix:
+ description: Prefix to use for server-side filtering of files in the
+ Bucket.
type: string
provider:
default: generic
- description: The S3 compatible storage provider name, default ('generic').
+ description: |-
+ Provider of the object storage bucket.
+ Defaults to 'generic', which expects an S3 (API) compatible object
+ storage.
enum:
- generic
- aws
- gcp
+ - azure
type: string
+ proxySecretRef:
+ description: |-
+ ProxySecretRef specifies the Secret containing the proxy configuration
+ to use while communicating with the Bucket server.
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
region:
- description: The bucket region.
+ description: Region of the Endpoint where the BucketName is located
+ in.
type: string
secretRef:
- description: The name of the secret containing authentication credentials
+ description: |-
+ SecretRef specifies the Secret containing authentication credentials
for the Bucket.
properties:
name:
@@ -116,92 +142,191 @@ spec:
required:
- name
type: object
+ serviceAccountName:
+ description: |-
+ ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate
+ the bucket. This field is only supported for the 'gcp' and 'aws' providers.
+ For more information about workload identity:
+ https://fluxcd.io/flux/components/source/buckets/#workload-identity
+ type: string
+ sts:
+ description: |-
+ STS specifies the required configuration to use a Security Token
+ Service for fetching temporary credentials to authenticate in a
+ Bucket provider.
+
+ This field is only supported for the `aws` and `generic` providers.
+ properties:
+ certSecretRef:
+ description: |-
+ CertSecretRef can be given the name of a Secret containing
+ either or both of
+
+ - a PEM-encoded client certificate (`tls.crt`) and private
+ key (`tls.key`);
+ - a PEM-encoded CA certificate (`ca.crt`)
+
+ and whichever are supplied, will be used for connecting to the
+ STS endpoint. The client cert and key are useful if you are
+ authenticating with a certificate; the CA cert is useful if
+ you are using a self-signed server certificate. The Secret must
+ be of type `Opaque` or `kubernetes.io/tls`.
+
+ This field is only supported for the `ldap` provider.
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ endpoint:
+ description: |-
+ Endpoint is the HTTP/S endpoint of the Security Token Service from
+ where temporary credentials will be fetched.
+ pattern: ^(http|https)://.*$
+ type: string
+ provider:
+ description: Provider of the Security Token Service.
+ enum:
+ - aws
+ - ldap
+ type: string
+ secretRef:
+ description: |-
+ SecretRef specifies the Secret containing authentication credentials
+ for the STS endpoint. This Secret must contain the fields `username`
+ and `password` and is supported only for the `ldap` provider.
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ required:
+ - endpoint
+ - provider
+ type: object
suspend:
- description: This flag tells the controller to suspend the reconciliation
- of this source.
+ description: |-
+ Suspend tells the controller to suspend the reconciliation of this
+ Bucket.
type: boolean
timeout:
default: 60s
- description: The timeout for download operations, defaults to 60s.
+ description: Timeout for fetch operations, defaults to 60s.
+ pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$
type: string
required:
- bucketName
- endpoint
- interval
type: object
+ x-kubernetes-validations:
+ - message: STS configuration is only supported for the 'aws' and 'generic'
+ Bucket providers
+ rule: self.provider == 'aws' || self.provider == 'generic' || !has(self.sts)
+ - message: '''aws'' is the only supported STS provider for the ''aws''
+ Bucket provider'
+ rule: self.provider != 'aws' || !has(self.sts) || self.sts.provider
+ == 'aws'
+ - message: '''ldap'' is the only supported STS provider for the ''generic''
+ Bucket provider'
+ rule: self.provider != 'generic' || !has(self.sts) || self.sts.provider
+ == 'ldap'
+ - message: spec.sts.secretRef is not required for the 'aws' STS provider
+ rule: '!has(self.sts) || self.sts.provider != ''aws'' || !has(self.sts.secretRef)'
+ - message: spec.sts.certSecretRef is not required for the 'aws' STS provider
+ rule: '!has(self.sts) || self.sts.provider != ''aws'' || !has(self.sts.certSecretRef)'
+ - message: ServiceAccountName is not supported for the 'generic' Bucket
+ provider
+ rule: self.provider != 'generic' || !has(self.serviceAccountName)
+ - message: cannot set both .spec.secretRef and .spec.serviceAccountName
+ rule: '!has(self.secretRef) || !has(self.serviceAccountName)'
status:
default:
observedGeneration: -1
- description: BucketStatus defines the observed state of a bucket
+ description: BucketStatus records the observed state of a Bucket.
properties:
artifact:
- description: Artifact represents the output of the last successful
- Bucket sync.
+ description: Artifact represents the last successful Bucket reconciliation.
properties:
- checksum:
- description: Checksum is the SHA256 checksum of the artifact.
+ digest:
+ description: Digest is the digest of the file in the form of ':'.
+ pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$
type: string
lastUpdateTime:
- description: LastUpdateTime is the timestamp corresponding to
- the last update of this artifact.
+ description: |-
+ LastUpdateTime is the timestamp corresponding to the last update of the
+ Artifact.
format: date-time
type: string
+ metadata:
+ additionalProperties:
+ type: string
+ description: Metadata holds upstream information such as OCI annotations.
+ type: object
path:
- description: Path is the relative file path of this artifact.
+ description: |-
+ Path is the relative file path of the Artifact. It can be used to locate
+ the file in the root of the Artifact storage on the local file system of
+ the controller managing the Source.
type: string
revision:
- description: Revision is a human readable identifier traceable
- in the origin source system. It can be a Git commit SHA, Git
- tag, a Helm index timestamp, a Helm chart version, etc.
+ description: |-
+ Revision is a human-readable identifier traceable in the origin source
+ system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.
type: string
+ size:
+ description: Size is the number of bytes in the file.
+ format: int64
+ type: integer
url:
- description: URL is the HTTP address of this artifact.
+ description: |-
+ URL is the HTTP address of the Artifact as exposed by the controller
+ managing the Source. It can be used to retrieve the Artifact for
+ consumption, e.g. by another controller applying the Artifact contents.
type: string
required:
+ - digest
+ - lastUpdateTime
- path
+ - revision
- url
type: object
conditions:
description: Conditions holds the conditions for the Bucket.
items:
- description: "Condition contains details for one aspect of the current
- state of this API Resource. --- This struct is intended for direct
- use as an array at the field path .status.conditions. For example,
- type FooStatus struct{ // Represents the observations of a
- foo's current state. // Known .status.conditions.type are:
- \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type
- \ // +patchStrategy=merge // +listType=map // +listMapKey=type
- \ Conditions []metav1.Condition `json:\"conditions,omitempty\"
- patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`
- \n // other fields }"
+ description: Condition contains details for one aspect of the current
+ state of this API Resource.
properties:
lastTransitionTime:
- description: lastTransitionTime is the last time the condition
- transitioned from one status to another. This should be when
- the underlying condition changed. If that is not known, then
- using the time when the API field changed is acceptable.
+ description: |-
+ lastTransitionTime is the last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
- description: message is a human readable message indicating
- details about the transition. This may be an empty string.
+ description: |-
+ message is a human readable message indicating details about the transition.
+ This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
- description: observedGeneration represents the .metadata.generation
- that the condition was set based upon. For instance, if .metadata.generation
- is currently 12, but the .status.conditions[x].observedGeneration
- is 9, the condition is out of date with respect to the current
- state of the instance.
+ description: |-
+ observedGeneration represents the .metadata.generation that the condition was set based upon.
+ For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
+ with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
- description: reason contains a programmatic identifier indicating
- the reason for the condition's last transition. Producers
- of specific condition types may define expected values and
- meanings for this field, and whether the values are considered
- a guaranteed API. The value should be a CamelCase string.
+ description: |-
+ reason contains a programmatic identifier indicating the reason for the condition's last transition.
+ Producers of specific condition types may define expected values and meanings for this field,
+ and whether the values are considered a guaranteed API.
+ The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
@@ -216,10 +341,6 @@ spec:
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
- --- Many .condition.type values are consistent across resources
- like Available, but because arbitrary conditions can be useful
- (see .node.status.conditions), the ability to deconflict is
- important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
@@ -232,22 +353,31 @@ spec:
type: object
type: array
lastHandledReconcileAt:
- description: LastHandledReconcileAt holds the value of the most recent
- reconcile request value, so a change of the annotation value can
- be detected.
+ description: |-
+ LastHandledReconcileAt holds the value of the most recent
+ reconcile request value, so a change of the annotation value
+ can be detected.
type: string
observedGeneration:
- description: ObservedGeneration is the last observed generation.
+ description: ObservedGeneration is the last observed generation of
+ the Bucket object.
format: int64
type: integer
+ observedIgnore:
+ description: |-
+ ObservedIgnore is the observed exclusion patterns used for constructing
+ the source artifact.
+ type: string
url:
- description: URL is the download link for the artifact output of the
- last Bucket sync.
+ description: |-
+ URL is the dynamic fetch link for the latest Artifact.
+ It is provided on a "best effort" basis, and using the precise
+ BucketStatus.Artifact data is recommended.
type: string
type: object
type: object
served: true
- storage: false
+ storage: true
subresources:
status: {}
- additionalPrinterColumns:
@@ -263,49 +393,57 @@ spec:
- jsonPath: .status.conditions[?(@.type=="Ready")].message
name: Status
type: string
+ deprecated: true
+ deprecationWarning: v1beta2 Bucket is deprecated, upgrade to v1
name: v1beta2
schema:
openAPIV3Schema:
description: Bucket is the Schema for the buckets API.
properties:
apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
- description: BucketSpec specifies the required configuration to produce
- an Artifact for an object storage bucket.
+ description: |-
+ BucketSpec specifies the required configuration to produce an Artifact for
+ an object storage bucket.
properties:
accessFrom:
- description: 'AccessFrom specifies an Access Control List for allowing
- cross-namespace references to this object. NOTE: Not implemented,
- provisional as of https://github.com/fluxcd/flux2/pull/2092'
+ description: |-
+ AccessFrom specifies an Access Control List for allowing cross-namespace
+ references to this object.
+ NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092
properties:
namespaceSelectors:
- description: NamespaceSelectors is the list of namespace selectors
- to which this ACL applies. Items in this list are evaluated
- using a logical OR operation.
+ description: |-
+ NamespaceSelectors is the list of namespace selectors to which this ACL applies.
+ Items in this list are evaluated using a logical OR operation.
items:
- description: NamespaceSelector selects the namespaces to which
- this ACL applies. An empty map of MatchLabels matches all
- namespaces in a cluster.
+ description: |-
+ NamespaceSelector selects the namespaces to which this ACL applies.
+ An empty map of MatchLabels matches all namespaces in a cluster.
properties:
matchLabels:
additionalProperties:
type: string
- description: MatchLabels is a map of {key,value} pairs.
- A single {key,value} in the matchLabels map is equivalent
- to an element of matchExpressions, whose key field is
- "key", the operator is "In", and the values array contains
- only "value". The requirements are ANDed.
+ description: |-
+ MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
type: array
@@ -315,39 +453,84 @@ spec:
bucketName:
description: BucketName is the name of the object storage bucket.
type: string
+ certSecretRef:
+ description: |-
+ CertSecretRef can be given the name of a Secret containing
+ either or both of
+
+ - a PEM-encoded client certificate (`tls.crt`) and private
+ key (`tls.key`);
+ - a PEM-encoded CA certificate (`ca.crt`)
+
+ and whichever are supplied, will be used for connecting to the
+ bucket. The client cert and key are useful if you are
+ authenticating with a certificate; the CA cert is useful if
+ you are using a self-signed server certificate. The Secret must
+ be of type `Opaque` or `kubernetes.io/tls`.
+
+ This field is only supported for the `generic` provider.
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
endpoint:
description: Endpoint is the object storage address the BucketName
is located at.
type: string
ignore:
- description: Ignore overrides the set of excluded patterns in the
- .sourceignore format (which is the same as .gitignore). If not provided,
- a default will be used, consult the documentation for your version
- to find out what those are.
+ description: |-
+ Ignore overrides the set of excluded patterns in the .sourceignore format
+ (which is the same as .gitignore). If not provided, a default will be used,
+ consult the documentation for your version to find out what those are.
type: string
insecure:
description: Insecure allows connecting to a non-TLS HTTP Endpoint.
type: boolean
interval:
- description: Interval at which to check the Endpoint for updates.
+ description: |-
+ Interval at which the Bucket Endpoint is checked for updates.
+ This interval is approximate and may be subject to jitter to ensure
+ efficient use of resources.
+ pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$
+ type: string
+ prefix:
+ description: Prefix to use for server-side filtering of files in the
+ Bucket.
type: string
provider:
default: generic
- description: Provider of the object storage bucket. Defaults to 'generic',
- which expects an S3 (API) compatible object storage.
+ description: |-
+ Provider of the object storage bucket.
+ Defaults to 'generic', which expects an S3 (API) compatible object
+ storage.
enum:
- generic
- aws
- gcp
- azure
type: string
+ proxySecretRef:
+ description: |-
+ ProxySecretRef specifies the Secret containing the proxy configuration
+ to use while communicating with the Bucket server.
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
region:
description: Region of the Endpoint where the BucketName is located
in.
type: string
secretRef:
- description: SecretRef specifies the Secret containing authentication
- credentials for the Bucket.
+ description: |-
+ SecretRef specifies the Secret containing authentication credentials
+ for the Bucket.
properties:
name:
description: Name of the referent.
@@ -355,19 +538,96 @@ spec:
required:
- name
type: object
+ sts:
+ description: |-
+ STS specifies the required configuration to use a Security Token
+ Service for fetching temporary credentials to authenticate in a
+ Bucket provider.
+
+ This field is only supported for the `aws` and `generic` providers.
+ properties:
+ certSecretRef:
+ description: |-
+ CertSecretRef can be given the name of a Secret containing
+ either or both of
+
+ - a PEM-encoded client certificate (`tls.crt`) and private
+ key (`tls.key`);
+ - a PEM-encoded CA certificate (`ca.crt`)
+
+ and whichever are supplied, will be used for connecting to the
+ STS endpoint. The client cert and key are useful if you are
+ authenticating with a certificate; the CA cert is useful if
+ you are using a self-signed server certificate. The Secret must
+ be of type `Opaque` or `kubernetes.io/tls`.
+
+ This field is only supported for the `ldap` provider.
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ endpoint:
+ description: |-
+ Endpoint is the HTTP/S endpoint of the Security Token Service from
+ where temporary credentials will be fetched.
+ pattern: ^(http|https)://.*$
+ type: string
+ provider:
+ description: Provider of the Security Token Service.
+ enum:
+ - aws
+ - ldap
+ type: string
+ secretRef:
+ description: |-
+ SecretRef specifies the Secret containing authentication credentials
+ for the STS endpoint. This Secret must contain the fields `username`
+ and `password` and is supported only for the `ldap` provider.
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ required:
+ - endpoint
+ - provider
+ type: object
suspend:
- description: Suspend tells the controller to suspend the reconciliation
- of this Bucket.
+ description: |-
+ Suspend tells the controller to suspend the reconciliation of this
+ Bucket.
type: boolean
timeout:
default: 60s
description: Timeout for fetch operations, defaults to 60s.
+ pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$
type: string
required:
- bucketName
- endpoint
- interval
type: object
+ x-kubernetes-validations:
+ - message: STS configuration is only supported for the 'aws' and 'generic'
+ Bucket providers
+ rule: self.provider == 'aws' || self.provider == 'generic' || !has(self.sts)
+ - message: '''aws'' is the only supported STS provider for the ''aws''
+ Bucket provider'
+ rule: self.provider != 'aws' || !has(self.sts) || self.sts.provider
+ == 'aws'
+ - message: '''ldap'' is the only supported STS provider for the ''generic''
+ Bucket provider'
+ rule: self.provider != 'generic' || !has(self.sts) || self.sts.provider
+ == 'ldap'
+ - message: spec.sts.secretRef is not required for the 'aws' STS provider
+ rule: '!has(self.sts) || self.sts.provider != ''aws'' || !has(self.sts.secretRef)'
+ - message: spec.sts.certSecretRef is not required for the 'aws' STS provider
+ rule: '!has(self.sts) || self.sts.provider != ''aws'' || !has(self.sts.certSecretRef)'
status:
default:
observedGeneration: -1
@@ -376,12 +636,14 @@ spec:
artifact:
description: Artifact represents the last successful Bucket reconciliation.
properties:
- checksum:
- description: Checksum is the SHA256 checksum of the Artifact file.
+ digest:
+ description: Digest is the digest of the file in the form of ':'.
+ pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$
type: string
lastUpdateTime:
- description: LastUpdateTime is the timestamp corresponding to
- the last update of the Artifact.
+ description: |-
+ LastUpdateTime is the timestamp corresponding to the last update of the
+ Artifact.
format: date-time
type: string
metadata:
@@ -390,70 +652,65 @@ spec:
description: Metadata holds upstream information such as OCI annotations.
type: object
path:
- description: Path is the relative file path of the Artifact. It
- can be used to locate the file in the root of the Artifact storage
- on the local file system of the controller managing the Source.
+ description: |-
+ Path is the relative file path of the Artifact. It can be used to locate
+ the file in the root of the Artifact storage on the local file system of
+ the controller managing the Source.
type: string
revision:
- description: Revision is a human-readable identifier traceable
- in the origin source system. It can be a Git commit SHA, Git
- tag, a Helm chart version, etc.
+ description: |-
+ Revision is a human-readable identifier traceable in the origin source
+ system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.
type: string
size:
description: Size is the number of bytes in the file.
format: int64
type: integer
url:
- description: URL is the HTTP address of the Artifact as exposed
- by the controller managing the Source. It can be used to retrieve
- the Artifact for consumption, e.g. by another controller applying
- the Artifact contents.
+ description: |-
+ URL is the HTTP address of the Artifact as exposed by the controller
+ managing the Source. It can be used to retrieve the Artifact for
+ consumption, e.g. by another controller applying the Artifact contents.
type: string
required:
+ - digest
+ - lastUpdateTime
- path
+ - revision
- url
type: object
conditions:
description: Conditions holds the conditions for the Bucket.
items:
- description: "Condition contains details for one aspect of the current
- state of this API Resource. --- This struct is intended for direct
- use as an array at the field path .status.conditions. For example,
- type FooStatus struct{ // Represents the observations of a
- foo's current state. // Known .status.conditions.type are:
- \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type
- \ // +patchStrategy=merge // +listType=map // +listMapKey=type
- \ Conditions []metav1.Condition `json:\"conditions,omitempty\"
- patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`
- \n // other fields }"
+ description: Condition contains details for one aspect of the current
+ state of this API Resource.
properties:
lastTransitionTime:
- description: lastTransitionTime is the last time the condition
- transitioned from one status to another. This should be when
- the underlying condition changed. If that is not known, then
- using the time when the API field changed is acceptable.
+ description: |-
+ lastTransitionTime is the last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
- description: message is a human readable message indicating
- details about the transition. This may be an empty string.
+ description: |-
+ message is a human readable message indicating details about the transition.
+ This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
- description: observedGeneration represents the .metadata.generation
- that the condition was set based upon. For instance, if .metadata.generation
- is currently 12, but the .status.conditions[x].observedGeneration
- is 9, the condition is out of date with respect to the current
- state of the instance.
+ description: |-
+ observedGeneration represents the .metadata.generation that the condition was set based upon.
+ For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
+ with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
- description: reason contains a programmatic identifier indicating
- the reason for the condition's last transition. Producers
- of specific condition types may define expected values and
- meanings for this field, and whether the values are considered
- a guaranteed API. The value should be a CamelCase string.
+ description: |-
+ reason contains a programmatic identifier indicating the reason for the condition's last transition.
+ Producers of specific condition types may define expected values and meanings for this field,
+ and whether the values are considered a guaranteed API.
+ The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
@@ -468,10 +725,6 @@ spec:
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
- --- Many .condition.type values are consistent across resources
- like Available, but because arbitrary conditions can be useful
- (see .node.status.conditions), the ability to deconflict is
- important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
@@ -484,29 +737,30 @@ spec:
type: object
type: array
lastHandledReconcileAt:
- description: LastHandledReconcileAt holds the value of the most recent
- reconcile request value, so a change of the annotation value can
- be detected.
+ description: |-
+ LastHandledReconcileAt holds the value of the most recent
+ reconcile request value, so a change of the annotation value
+ can be detected.
type: string
observedGeneration:
description: ObservedGeneration is the last observed generation of
the Bucket object.
format: int64
type: integer
+ observedIgnore:
+ description: |-
+ ObservedIgnore is the observed exclusion patterns used for constructing
+ the source artifact.
+ type: string
url:
- description: URL is the dynamic fetch link for the latest Artifact.
- It is provided on a "best effort" basis, and using the precise BucketStatus.Artifact
- data is recommended.
+ description: |-
+ URL is the dynamic fetch link for the latest Artifact.
+ It is provided on a "best effort" basis, and using the precise
+ BucketStatus.Artifact data is recommended.
type: string
type: object
type: object
served: true
- storage: true
+ storage: false
subresources:
status: {}
-status:
- acceptedNames:
- kind: ""
- plural: ""
- conditions: []
- storedVersions: []
diff --git a/config/crd/bases/source.toolkit.fluxcd.io_externalartifacts.yaml b/config/crd/bases/source.toolkit.fluxcd.io_externalartifacts.yaml
new file mode 100644
index 000000000..23cdf63c3
--- /dev/null
+++ b/config/crd/bases/source.toolkit.fluxcd.io_externalartifacts.yaml
@@ -0,0 +1,191 @@
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.19.0
+ name: externalartifacts.source.toolkit.fluxcd.io
+spec:
+ group: source.toolkit.fluxcd.io
+ names:
+ kind: ExternalArtifact
+ listKind: ExternalArtifactList
+ plural: externalartifacts
+ singular: externalartifact
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - jsonPath: .status.conditions[?(@.type=="Ready")].status
+ name: Ready
+ type: string
+ - jsonPath: .status.conditions[?(@.type=="Ready")].message
+ name: Status
+ type: string
+ - jsonPath: .spec.sourceRef.name
+ name: Source
+ type: string
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: ExternalArtifact is the Schema for the external artifacts API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: ExternalArtifactSpec defines the desired state of ExternalArtifact
+ properties:
+ sourceRef:
+ description: |-
+ SourceRef points to the Kubernetes custom resource for
+ which the artifact is generated.
+ properties:
+ apiVersion:
+ description: API version of the referent, if not specified the
+ Kubernetes preferred version will be used.
+ type: string
+ kind:
+ description: Kind of the referent.
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ namespace:
+ description: Namespace of the referent, when not specified it
+ acts as LocalObjectReference.
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ type: object
+ status:
+ description: ExternalArtifactStatus defines the observed state of ExternalArtifact
+ properties:
+ artifact:
+ description: Artifact represents the output of an ExternalArtifact
+ reconciliation.
+ properties:
+ digest:
+ description: Digest is the digest of the file in the form of ':'.
+ pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$
+ type: string
+ lastUpdateTime:
+ description: |-
+ LastUpdateTime is the timestamp corresponding to the last update of the
+ Artifact.
+ format: date-time
+ type: string
+ metadata:
+ additionalProperties:
+ type: string
+ description: Metadata holds upstream information such as OCI annotations.
+ type: object
+ path:
+ description: |-
+ Path is the relative file path of the Artifact. It can be used to locate
+ the file in the root of the Artifact storage on the local file system of
+ the controller managing the Source.
+ type: string
+ revision:
+ description: |-
+ Revision is a human-readable identifier traceable in the origin source
+ system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.
+ type: string
+ size:
+ description: Size is the number of bytes in the file.
+ format: int64
+ type: integer
+ url:
+ description: |-
+ URL is the HTTP address of the Artifact as exposed by the controller
+ managing the Source. It can be used to retrieve the Artifact for
+ consumption, e.g. by another controller applying the Artifact contents.
+ type: string
+ required:
+ - digest
+ - lastUpdateTime
+ - path
+ - revision
+ - url
+ type: object
+ conditions:
+ description: Conditions holds the conditions for the ExternalArtifact.
+ items:
+ description: Condition contains details for one aspect of the current
+ state of this API Resource.
+ properties:
+ lastTransitionTime:
+ description: |-
+ lastTransitionTime is the last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ message is a human readable message indicating details about the transition.
+ This may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: |-
+ observedGeneration represents the .metadata.generation that the condition was set based upon.
+ For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
+ with respect to the current state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: |-
+ reason contains a programmatic identifier indicating the reason for the condition's last transition.
+ Producers of specific condition types may define expected values and meanings for this field,
+ and whether the values are considered a guaranteed API.
+ The value should be a CamelCase string.
+ This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True, False, Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in foo.example.com/CamelCase.
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
diff --git a/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml
index b260fb694..10663e473 100644
--- a/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml
+++ b/config/crd/bases/source.toolkit.fluxcd.io_gitrepositories.yaml
@@ -1,11 +1,9 @@
-
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.7.0
- creationTimestamp: null
+ controller-gen.kubebuilder.io/version: v0.19.0
name: gitrepositories.source.toolkit.fluxcd.io
spec:
group: source.toolkit.fluxcd.io
@@ -22,88 +20,66 @@ spec:
- jsonPath: .spec.url
name: URL
type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
- jsonPath: .status.conditions[?(@.type=="Ready")].status
name: Ready
type: string
- jsonPath: .status.conditions[?(@.type=="Ready")].message
name: Status
type: string
- - jsonPath: .metadata.creationTimestamp
- name: Age
- type: date
- name: v1beta1
+ name: v1
schema:
openAPIV3Schema:
- description: GitRepository is the Schema for the gitrepositories API
+ description: GitRepository is the Schema for the gitrepositories API.
properties:
apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
- description: GitRepositorySpec defines the desired state of a Git repository.
+ description: |-
+ GitRepositorySpec specifies the required configuration to produce an
+ Artifact for a Git repository.
properties:
- accessFrom:
- description: AccessFrom defines an Access Control List for allowing
- cross-namespace references to this object.
- properties:
- namespaceSelectors:
- description: NamespaceSelectors is the list of namespace selectors
- to which this ACL applies. Items in this list are evaluated
- using a logical OR operation.
- items:
- description: NamespaceSelector selects the namespaces to which
- this ACL applies. An empty map of MatchLabels matches all
- namespaces in a cluster.
- properties:
- matchLabels:
- additionalProperties:
- type: string
- description: MatchLabels is a map of {key,value} pairs.
- A single {key,value} in the matchLabels map is equivalent
- to an element of matchExpressions, whose key field is
- "key", the operator is "In", and the values array contains
- only "value". The requirements are ANDed.
- type: object
- type: object
- type: array
- required:
- - namespaceSelectors
- type: object
- gitImplementation:
- default: go-git
- description: Determines which git client library to use. Defaults
- to go-git, valid values are ('go-git', 'libgit2').
- enum:
- - go-git
- - libgit2
- type: string
ignore:
- description: Ignore overrides the set of excluded patterns in the
- .sourceignore format (which is the same as .gitignore). If not provided,
- a default will be used, consult the documentation for your version
- to find out what those are.
+ description: |-
+ Ignore overrides the set of excluded patterns in the .sourceignore format
+ (which is the same as .gitignore). If not provided, a default will be used,
+ consult the documentation for your version to find out what those are.
type: string
include:
- description: Extra git repositories to map into the repository
+ description: |-
+ Include specifies a list of GitRepository resources which Artifacts
+ should be included in the Artifact produced for this GitRepository.
items:
- description: GitRepositoryInclude defines a source with a from and
- to path.
+ description: |-
+ GitRepositoryInclude specifies a local reference to a GitRepository which
+ Artifact (sub-)contents must be included, and where they should be placed.
properties:
fromPath:
- description: The path to copy contents from, defaults to the
- root directory.
+ description: |-
+ FromPath specifies the path to copy contents from, defaults to the root
+ of the Artifact.
type: string
repository:
- description: Reference to a GitRepository to include.
+ description: |-
+ GitRepositoryRef specifies the GitRepository which Artifact contents
+ must be included.
properties:
name:
description: Name of the referent.
@@ -112,45 +88,85 @@ spec:
- name
type: object
toPath:
- description: The path to copy contents to, defaults to the name
- of the source ref.
+ description: |-
+ ToPath specifies the path to copy contents to, defaults to the name of
+ the GitRepositoryRef.
type: string
required:
- repository
type: object
type: array
interval:
- description: The interval at which to check for repository updates.
+ description: |-
+ Interval at which the GitRepository URL is checked for updates.
+ This interval is approximate and may be subject to jitter to ensure
+ efficient use of resources.
+ pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$
+ type: string
+ provider:
+ description: |-
+ Provider used for authentication, can be 'azure', 'github', 'generic'.
+ When not specified, defaults to 'generic'.
+ enum:
+ - generic
+ - azure
+ - github
type: string
+ proxySecretRef:
+ description: |-
+ ProxySecretRef specifies the Secret containing the proxy configuration
+ to use while communicating with the Git server.
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
recurseSubmodules:
- description: When enabled, after the clone is created, initializes
- all submodules within, using their default settings. This option
- is available only when using the 'go-git' GitImplementation.
+ description: |-
+ RecurseSubmodules enables the initialization of all submodules within
+ the GitRepository as cloned from the URL, using their default settings.
type: boolean
ref:
- description: The Git reference to checkout and monitor for changes,
- defaults to master branch.
+ description: |-
+ Reference specifies the Git reference to resolve and monitor for
+ changes, defaults to the 'master' branch.
properties:
branch:
- description: The Git branch to checkout, defaults to master.
+ description: Branch to check out, defaults to 'master' if no other
+ field is defined.
type: string
commit:
- description: The Git commit SHA to checkout, if specified Tag
- filters will be ignored.
+ description: |-
+ Commit SHA to check out, takes precedence over all reference fields.
+
+ This can be combined with Branch to shallow clone the branch, in which
+ the commit is expected to exist.
+ type: string
+ name:
+ description: |-
+ Name of the reference to check out; takes precedence over Branch, Tag and SemVer.
+
+ It must be a valid Git reference: https://git-scm.com/docs/git-check-ref-format#_description
+ Examples: "refs/heads/main", "refs/tags/v0.1.0", "refs/pull/420/head", "refs/merge-requests/1/head"
type: string
semver:
- description: The Git tag semver expression, takes precedence over
- Tag.
+ description: SemVer tag expression to check out, takes precedence
+ over Tag.
type: string
tag:
- description: The Git tag to checkout, takes precedence over Branch.
+ description: Tag to check out, takes precedence over Branch.
type: string
type: object
secretRef:
- description: The secret name containing the Git credentials. For HTTPS
- repositories the secret must contain username and password fields.
- For SSH repositories the secret must contain identity and known_hosts
- fields.
+ description: |-
+ SecretRef specifies the Secret containing authentication credentials for
+ the GitRepository.
+ For HTTPS repositories the Secret must contain 'username' and 'password'
+ fields for basic auth or 'bearerToken' field for token auth.
+ For SSH repositories the Secret must contain 'identity'
+ and 'known_hosts' fields.
properties:
name:
description: Name of the referent.
@@ -158,32 +174,58 @@ spec:
required:
- name
type: object
+ serviceAccountName:
+ description: |-
+ ServiceAccountName is the name of the Kubernetes ServiceAccount used to
+ authenticate to the GitRepository. This field is only supported for 'azure' provider.
+ type: string
+ sparseCheckout:
+ description: |-
+ SparseCheckout specifies a list of directories to checkout when cloning
+ the repository. If specified, only these directories are included in the
+ Artifact produced for this GitRepository.
+ items:
+ type: string
+ type: array
suspend:
- description: This flag tells the controller to suspend the reconciliation
- of this source.
+ description: |-
+ Suspend tells the controller to suspend the reconciliation of this
+ GitRepository.
type: boolean
timeout:
default: 60s
- description: The timeout for remote Git operations like cloning, defaults
- to 60s.
+ description: Timeout for Git operations like cloning, defaults to
+ 60s.
+ pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$
type: string
url:
- description: The repository URL, can be a HTTP/S or SSH address.
+ description: URL specifies the Git repository URL, it can be an HTTP/S
+ or SSH address.
pattern: ^(http|https|ssh)://.*$
type: string
verify:
- description: Verify OpenPGP signature for the Git commit HEAD points
- to.
+ description: |-
+ Verification specifies the configuration to verify the Git commit
+ signature(s).
properties:
mode:
- description: Mode describes what git object should be verified,
- currently ('head').
+ default: HEAD
+ description: |-
+ Mode specifies which Git object(s) should be verified.
+
+ The variants "head" and "HEAD" both imply the same thing, i.e. verify
+ the commit that the HEAD of the Git repository points to. The variant
+ "head" solely exists to ensure backwards compatibility.
enum:
- head
+ - HEAD
+ - Tag
+ - TagAndHEAD
type: string
secretRef:
- description: The secret name containing the public keys of all
- trusted Git authors.
+ description: |-
+ SecretRef specifies the Secret containing the public keys of trusted Git
+ authors.
properties:
name:
description: Name of the referent.
@@ -192,85 +234,100 @@ spec:
- name
type: object
required:
- - mode
+ - secretRef
type: object
required:
- interval
- url
type: object
+ x-kubernetes-validations:
+ - message: serviceAccountName can only be set when provider is 'azure'
+ rule: '!has(self.serviceAccountName) || (has(self.provider) && self.provider
+ == ''azure'')'
status:
default:
observedGeneration: -1
- description: GitRepositoryStatus defines the observed state of a Git repository.
+ description: GitRepositoryStatus records the observed state of a Git repository.
properties:
artifact:
- description: Artifact represents the output of the last successful
- repository sync.
+ description: Artifact represents the last successful GitRepository
+ reconciliation.
properties:
- checksum:
- description: Checksum is the SHA256 checksum of the artifact.
+ digest:
+ description: Digest is the digest of the file in the form of ':'.
+ pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$
type: string
lastUpdateTime:
- description: LastUpdateTime is the timestamp corresponding to
- the last update of this artifact.
+ description: |-
+ LastUpdateTime is the timestamp corresponding to the last update of the
+ Artifact.
format: date-time
type: string
+ metadata:
+ additionalProperties:
+ type: string
+ description: Metadata holds upstream information such as OCI annotations.
+ type: object
path:
- description: Path is the relative file path of this artifact.
+ description: |-
+ Path is the relative file path of the Artifact. It can be used to locate
+ the file in the root of the Artifact storage on the local file system of
+ the controller managing the Source.
type: string
revision:
- description: Revision is a human readable identifier traceable
- in the origin source system. It can be a Git commit SHA, Git
- tag, a Helm index timestamp, a Helm chart version, etc.
+ description: |-
+ Revision is a human-readable identifier traceable in the origin source
+ system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.
type: string
+ size:
+ description: Size is the number of bytes in the file.
+ format: int64
+ type: integer
url:
- description: URL is the HTTP address of this artifact.
+ description: |-
+ URL is the HTTP address of the Artifact as exposed by the controller
+ managing the Source. It can be used to retrieve the Artifact for
+ consumption, e.g. by another controller applying the Artifact contents.
type: string
required:
+ - digest
+ - lastUpdateTime
- path
+ - revision
- url
type: object
conditions:
description: Conditions holds the conditions for the GitRepository.
items:
- description: "Condition contains details for one aspect of the current
- state of this API Resource. --- This struct is intended for direct
- use as an array at the field path .status.conditions. For example,
- type FooStatus struct{ // Represents the observations of a
- foo's current state. // Known .status.conditions.type are:
- \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type
- \ // +patchStrategy=merge // +listType=map // +listMapKey=type
- \ Conditions []metav1.Condition `json:\"conditions,omitempty\"
- patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`
- \n // other fields }"
+ description: Condition contains details for one aspect of the current
+ state of this API Resource.
properties:
lastTransitionTime:
- description: lastTransitionTime is the last time the condition
- transitioned from one status to another. This should be when
- the underlying condition changed. If that is not known, then
- using the time when the API field changed is acceptable.
+ description: |-
+ lastTransitionTime is the last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
- description: message is a human readable message indicating
- details about the transition. This may be an empty string.
+ description: |-
+ message is a human readable message indicating details about the transition.
+ This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
- description: observedGeneration represents the .metadata.generation
- that the condition was set based upon. For instance, if .metadata.generation
- is currently 12, but the .status.conditions[x].observedGeneration
- is 9, the condition is out of date with respect to the current
- state of the instance.
+ description: |-
+ observedGeneration represents the .metadata.generation that the condition was set based upon.
+ For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
+ with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
- description: reason contains a programmatic identifier indicating
- the reason for the condition's last transition. Producers
- of specific condition types may define expected values and
- meanings for this field, and whether the values are considered
- a guaranteed API. The value should be a CamelCase string.
+ description: |-
+ reason contains a programmatic identifier indicating the reason for the condition's last transition.
+ Producers of specific condition types may define expected values and meanings for this field,
+ and whether the values are considered a guaranteed API.
+ The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
@@ -285,10 +342,6 @@ spec:
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
- --- Many .condition.type values are consistent across resources
- like Available, but because arbitrary conditions can be useful
- (see .node.status.conditions), the ability to deconflict is
- important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
@@ -301,52 +354,130 @@ spec:
type: object
type: array
includedArtifacts:
- description: IncludedArtifacts represents the included artifacts from
- the last successful repository sync.
+ description: |-
+ IncludedArtifacts contains a list of the last successfully included
+ Artifacts as instructed by GitRepositorySpec.Include.
items:
- description: Artifact represents the output of a source synchronisation.
+ description: Artifact represents the output of a Source reconciliation.
properties:
- checksum:
- description: Checksum is the SHA256 checksum of the artifact.
+ digest:
+ description: Digest is the digest of the file in the form of
+ ':'.
+ pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$
type: string
lastUpdateTime:
- description: LastUpdateTime is the timestamp corresponding to
- the last update of this artifact.
+ description: |-
+ LastUpdateTime is the timestamp corresponding to the last update of the
+ Artifact.
format: date-time
type: string
+ metadata:
+ additionalProperties:
+ type: string
+ description: Metadata holds upstream information such as OCI
+ annotations.
+ type: object
path:
- description: Path is the relative file path of this artifact.
+ description: |-
+ Path is the relative file path of the Artifact. It can be used to locate
+ the file in the root of the Artifact storage on the local file system of
+ the controller managing the Source.
type: string
revision:
- description: Revision is a human readable identifier traceable
- in the origin source system. It can be a Git commit SHA, Git
- tag, a Helm index timestamp, a Helm chart version, etc.
+ description: |-
+ Revision is a human-readable identifier traceable in the origin source
+ system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.
type: string
+ size:
+ description: Size is the number of bytes in the file.
+ format: int64
+ type: integer
url:
- description: URL is the HTTP address of this artifact.
+ description: |-
+ URL is the HTTP address of the Artifact as exposed by the controller
+ managing the Source. It can be used to retrieve the Artifact for
+ consumption, e.g. by another controller applying the Artifact contents.
type: string
required:
+ - digest
+ - lastUpdateTime
- path
+ - revision
- url
type: object
type: array
lastHandledReconcileAt:
- description: LastHandledReconcileAt holds the value of the most recent
- reconcile request value, so a change of the annotation value can
- be detected.
+ description: |-
+ LastHandledReconcileAt holds the value of the most recent
+ reconcile request value, so a change of the annotation value
+ can be detected.
type: string
observedGeneration:
- description: ObservedGeneration is the last observed generation.
+ description: |-
+ ObservedGeneration is the last observed generation of the GitRepository
+ object.
format: int64
type: integer
- url:
- description: URL is the download link for the artifact output of the
- last repository sync.
+ observedIgnore:
+ description: |-
+ ObservedIgnore is the observed exclusion patterns used for constructing
+ the source artifact.
+ type: string
+ observedInclude:
+ description: |-
+ ObservedInclude is the observed list of GitRepository resources used to
+ produce the current Artifact.
+ items:
+ description: |-
+ GitRepositoryInclude specifies a local reference to a GitRepository which
+ Artifact (sub-)contents must be included, and where they should be placed.
+ properties:
+ fromPath:
+ description: |-
+ FromPath specifies the path to copy contents from, defaults to the root
+ of the Artifact.
+ type: string
+ repository:
+ description: |-
+ GitRepositoryRef specifies the GitRepository which Artifact contents
+ must be included.
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ toPath:
+ description: |-
+ ToPath specifies the path to copy contents to, defaults to the name of
+ the GitRepositoryRef.
+ type: string
+ required:
+ - repository
+ type: object
+ type: array
+ observedRecurseSubmodules:
+ description: |-
+ ObservedRecurseSubmodules is the observed resource submodules
+ configuration used to produce the current Artifact.
+ type: boolean
+ observedSparseCheckout:
+ description: |-
+ ObservedSparseCheckout is the observed list of directories used to
+ produce the current Artifact.
+ items:
+ type: string
+ type: array
+ sourceVerificationMode:
+ description: |-
+ SourceVerificationMode is the last used verification mode indicating
+ which Git object(s) have been verified.
type: string
type: object
type: object
served: true
- storage: false
+ storage: true
subresources:
status: {}
- additionalPrinterColumns:
@@ -362,49 +493,57 @@ spec:
- jsonPath: .status.conditions[?(@.type=="Ready")].message
name: Status
type: string
+ deprecated: true
+ deprecationWarning: v1beta2 GitRepository is deprecated, upgrade to v1
name: v1beta2
schema:
openAPIV3Schema:
description: GitRepository is the Schema for the gitrepositories API.
properties:
apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
- description: GitRepositorySpec specifies the required configuration to
- produce an Artifact for a Git repository.
+ description: |-
+ GitRepositorySpec specifies the required configuration to produce an
+ Artifact for a Git repository.
properties:
accessFrom:
- description: 'AccessFrom specifies an Access Control List for allowing
- cross-namespace references to this object. NOTE: Not implemented,
- provisional as of https://github.com/fluxcd/flux2/pull/2092'
+ description: |-
+ AccessFrom specifies an Access Control List for allowing cross-namespace
+ references to this object.
+ NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092
properties:
namespaceSelectors:
- description: NamespaceSelectors is the list of namespace selectors
- to which this ACL applies. Items in this list are evaluated
- using a logical OR operation.
+ description: |-
+ NamespaceSelectors is the list of namespace selectors to which this ACL applies.
+ Items in this list are evaluated using a logical OR operation.
items:
- description: NamespaceSelector selects the namespaces to which
- this ACL applies. An empty map of MatchLabels matches all
- namespaces in a cluster.
+ description: |-
+ NamespaceSelector selects the namespaces to which this ACL applies.
+ An empty map of MatchLabels matches all namespaces in a cluster.
properties:
matchLabels:
additionalProperties:
type: string
- description: MatchLabels is a map of {key,value} pairs.
- A single {key,value} in the matchLabels map is equivalent
- to an element of matchExpressions, whose key field is
- "key", the operator is "In", and the values array contains
- only "value". The requirements are ANDed.
+ description: |-
+ MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
type: array
@@ -413,34 +552,39 @@ spec:
type: object
gitImplementation:
default: go-git
- description: GitImplementation specifies which Git client library
- implementation to use. Defaults to 'go-git', valid values are ('go-git',
- 'libgit2').
+ description: |-
+ GitImplementation specifies which Git client library implementation to
+ use. Defaults to 'go-git', valid values are ('go-git', 'libgit2').
+ Deprecated: gitImplementation is deprecated now that 'go-git' is the
+ only supported implementation.
enum:
- go-git
- libgit2
type: string
ignore:
- description: Ignore overrides the set of excluded patterns in the
- .sourceignore format (which is the same as .gitignore). If not provided,
- a default will be used, consult the documentation for your version
- to find out what those are.
+ description: |-
+ Ignore overrides the set of excluded patterns in the .sourceignore format
+ (which is the same as .gitignore). If not provided, a default will be used,
+ consult the documentation for your version to find out what those are.
type: string
include:
- description: Include specifies a list of GitRepository resources which
- Artifacts should be included in the Artifact produced for this GitRepository.
+ description: |-
+ Include specifies a list of GitRepository resources which Artifacts
+ should be included in the Artifact produced for this GitRepository.
items:
- description: GitRepositoryInclude specifies a local reference to
- a GitRepository which Artifact (sub-)contents must be included,
- and where they should be placed.
+ description: |-
+ GitRepositoryInclude specifies a local reference to a GitRepository which
+ Artifact (sub-)contents must be included, and where they should be placed.
properties:
fromPath:
- description: FromPath specifies the path to copy contents from,
- defaults to the root of the Artifact.
+ description: |-
+ FromPath specifies the path to copy contents from, defaults to the root
+ of the Artifact.
type: string
repository:
- description: GitRepositoryRef specifies the GitRepository which
- Artifact contents must be included.
+ description: |-
+ GitRepositoryRef specifies the GitRepository which Artifact contents
+ must be included.
properties:
name:
description: Name of the referent.
@@ -449,8 +593,9 @@ spec:
- name
type: object
toPath:
- description: ToPath specifies the path to copy contents to,
- defaults to the name of the GitRepositoryRef.
+ description: |-
+ ToPath specifies the path to copy contents to, defaults to the name of
+ the GitRepositoryRef.
type: string
required:
- repository
@@ -458,28 +603,35 @@ spec:
type: array
interval:
description: Interval at which to check the GitRepository for updates.
+ pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$
type: string
recurseSubmodules:
- description: RecurseSubmodules enables the initialization of all submodules
- within the GitRepository as cloned from the URL, using their default
- settings. This option is available only when using the 'go-git'
- GitImplementation.
+ description: |-
+ RecurseSubmodules enables the initialization of all submodules within
+ the GitRepository as cloned from the URL, using their default settings.
type: boolean
ref:
- description: Reference specifies the Git reference to resolve and
- monitor for changes, defaults to the 'master' branch.
+ description: |-
+ Reference specifies the Git reference to resolve and monitor for
+ changes, defaults to the 'master' branch.
properties:
branch:
- description: "Branch to check out, defaults to 'master' if no
- other field is defined. \n When GitRepositorySpec.GitImplementation
- is set to 'go-git', a shallow clone of the specified branch
- is performed."
+ description: Branch to check out, defaults to 'master' if no other
+ field is defined.
type: string
commit:
- description: "Commit SHA to check out, takes precedence over all
- reference fields. \n When GitRepositorySpec.GitImplementation
- is set to 'go-git', this can be combined with Branch to shallow
- clone the branch, in which the commit is expected to exist."
+ description: |-
+ Commit SHA to check out, takes precedence over all reference fields.
+
+ This can be combined with Branch to shallow clone the branch, in which
+ the commit is expected to exist.
+ type: string
+ name:
+ description: |-
+ Name of the reference to check out; takes precedence over Branch, Tag and SemVer.
+
+ It must be a valid Git reference: https://git-scm.com/docs/git-check-ref-format#_description
+ Examples: "refs/heads/main", "refs/tags/v0.1.0", "refs/pull/420/head", "refs/merge-requests/1/head"
type: string
semver:
description: SemVer tag expression to check out, takes precedence
@@ -490,10 +642,13 @@ spec:
type: string
type: object
secretRef:
- description: SecretRef specifies the Secret containing authentication
- credentials for the GitRepository. For HTTPS repositories the Secret
- must contain 'username' and 'password' fields. For SSH repositories
- the Secret must contain 'identity' and 'known_hosts' fields.
+ description: |-
+ SecretRef specifies the Secret containing authentication credentials for
+ the GitRepository.
+ For HTTPS repositories the Secret must contain 'username' and 'password'
+ fields for basic auth or 'bearerToken' field for token auth.
+ For SSH repositories the Secret must contain 'identity'
+ and 'known_hosts' fields.
properties:
name:
description: Name of the referent.
@@ -502,13 +657,15 @@ spec:
- name
type: object
suspend:
- description: Suspend tells the controller to suspend the reconciliation
- of this GitRepository.
+ description: |-
+ Suspend tells the controller to suspend the reconciliation of this
+ GitRepository.
type: boolean
timeout:
default: 60s
description: Timeout for Git operations like cloning, defaults to
60s.
+ pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$
type: string
url:
description: URL specifies the Git repository URL, it can be an HTTP/S
@@ -516,8 +673,9 @@ spec:
pattern: ^(http|https|ssh)://.*$
type: string
verify:
- description: Verification specifies the configuration to verify the
- Git commit signature(s).
+ description: |-
+ Verification specifies the configuration to verify the Git commit
+ signature(s).
properties:
mode:
description: Mode specifies what Git object should be verified,
@@ -526,8 +684,9 @@ spec:
- head
type: string
secretRef:
- description: SecretRef specifies the Secret containing the public
- keys of trusted Git authors.
+ description: |-
+ SecretRef specifies the Secret containing the public keys of trusted Git
+ authors.
properties:
name:
description: Name of the referent.
@@ -537,6 +696,7 @@ spec:
type: object
required:
- mode
+ - secretRef
type: object
required:
- interval
@@ -551,12 +711,14 @@ spec:
description: Artifact represents the last successful GitRepository
reconciliation.
properties:
- checksum:
- description: Checksum is the SHA256 checksum of the Artifact file.
+ digest:
+ description: Digest is the digest of the file in the form of ':'.
+ pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$
type: string
lastUpdateTime:
- description: LastUpdateTime is the timestamp corresponding to
- the last update of the Artifact.
+ description: |-
+ LastUpdateTime is the timestamp corresponding to the last update of the
+ Artifact.
format: date-time
type: string
metadata:
@@ -565,70 +727,65 @@ spec:
description: Metadata holds upstream information such as OCI annotations.
type: object
path:
- description: Path is the relative file path of the Artifact. It
- can be used to locate the file in the root of the Artifact storage
- on the local file system of the controller managing the Source.
+ description: |-
+ Path is the relative file path of the Artifact. It can be used to locate
+ the file in the root of the Artifact storage on the local file system of
+ the controller managing the Source.
type: string
revision:
- description: Revision is a human-readable identifier traceable
- in the origin source system. It can be a Git commit SHA, Git
- tag, a Helm chart version, etc.
+ description: |-
+ Revision is a human-readable identifier traceable in the origin source
+ system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.
type: string
size:
description: Size is the number of bytes in the file.
format: int64
type: integer
url:
- description: URL is the HTTP address of the Artifact as exposed
- by the controller managing the Source. It can be used to retrieve
- the Artifact for consumption, e.g. by another controller applying
- the Artifact contents.
+ description: |-
+ URL is the HTTP address of the Artifact as exposed by the controller
+ managing the Source. It can be used to retrieve the Artifact for
+ consumption, e.g. by another controller applying the Artifact contents.
type: string
required:
+ - digest
+ - lastUpdateTime
- path
+ - revision
- url
type: object
conditions:
description: Conditions holds the conditions for the GitRepository.
items:
- description: "Condition contains details for one aspect of the current
- state of this API Resource. --- This struct is intended for direct
- use as an array at the field path .status.conditions. For example,
- type FooStatus struct{ // Represents the observations of a
- foo's current state. // Known .status.conditions.type are:
- \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type
- \ // +patchStrategy=merge // +listType=map // +listMapKey=type
- \ Conditions []metav1.Condition `json:\"conditions,omitempty\"
- patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`
- \n // other fields }"
+ description: Condition contains details for one aspect of the current
+ state of this API Resource.
properties:
lastTransitionTime:
- description: lastTransitionTime is the last time the condition
- transitioned from one status to another. This should be when
- the underlying condition changed. If that is not known, then
- using the time when the API field changed is acceptable.
+ description: |-
+ lastTransitionTime is the last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
- description: message is a human readable message indicating
- details about the transition. This may be an empty string.
+ description: |-
+ message is a human readable message indicating details about the transition.
+ This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
- description: observedGeneration represents the .metadata.generation
- that the condition was set based upon. For instance, if .metadata.generation
- is currently 12, but the .status.conditions[x].observedGeneration
- is 9, the condition is out of date with respect to the current
- state of the instance.
+ description: |-
+ observedGeneration represents the .metadata.generation that the condition was set based upon.
+ For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
+ with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
- description: reason contains a programmatic identifier indicating
- the reason for the condition's last transition. Producers
- of specific condition types may define expected values and
- meanings for this field, and whether the values are considered
- a guaranteed API. The value should be a CamelCase string.
+ description: |-
+ reason contains a programmatic identifier indicating the reason for the condition's last transition.
+ Producers of specific condition types may define expected values and meanings for this field,
+ and whether the values are considered a guaranteed API.
+ The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
@@ -643,10 +800,6 @@ spec:
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
- --- Many .condition.type values are consistent across resources
- like Available, but because arbitrary conditions can be useful
- (see .node.status.conditions), the ability to deconflict is
- important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
@@ -659,27 +812,36 @@ spec:
type: object
type: array
contentConfigChecksum:
- description: 'ContentConfigChecksum is a checksum of all the configurations
- related to the content of the source artifact: - .spec.ignore -
- .spec.recurseSubmodules - .spec.included and the checksum of the
- included artifacts observed in .status.observedGeneration version
- of the object. This can be used to determine if the content of the
- included repository has changed. It has the format of `:`,
- for example: `sha256:`.'
+ description: |-
+ ContentConfigChecksum is a checksum of all the configurations related to
+ the content of the source artifact:
+ - .spec.ignore
+ - .spec.recurseSubmodules
+ - .spec.included and the checksum of the included artifacts
+ observed in .status.observedGeneration version of the object. This can
+ be used to determine if the content of the included repository has
+ changed.
+ It has the format of `:`, for example: `sha256:`.
+
+ Deprecated: Replaced with explicit fields for observed artifact content
+ config in the status.
type: string
includedArtifacts:
- description: IncludedArtifacts contains a list of the last successfully
- included Artifacts as instructed by GitRepositorySpec.Include.
+ description: |-
+ IncludedArtifacts contains a list of the last successfully included
+ Artifacts as instructed by GitRepositorySpec.Include.
items:
description: Artifact represents the output of a Source reconciliation.
properties:
- checksum:
- description: Checksum is the SHA256 checksum of the Artifact
- file.
+ digest:
+ description: Digest is the digest of the file in the form of
+ ':'.
+ pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$
type: string
lastUpdateTime:
- description: LastUpdateTime is the timestamp corresponding to
- the last update of the Artifact.
+ description: |-
+ LastUpdateTime is the timestamp corresponding to the last update of the
+ Artifact.
format: date-time
type: string
metadata:
@@ -689,55 +851,99 @@ spec:
annotations.
type: object
path:
- description: Path is the relative file path of the Artifact.
- It can be used to locate the file in the root of the Artifact
- storage on the local file system of the controller managing
- the Source.
+ description: |-
+ Path is the relative file path of the Artifact. It can be used to locate
+ the file in the root of the Artifact storage on the local file system of
+ the controller managing the Source.
type: string
revision:
- description: Revision is a human-readable identifier traceable
- in the origin source system. It can be a Git commit SHA, Git
- tag, a Helm chart version, etc.
+ description: |-
+ Revision is a human-readable identifier traceable in the origin source
+ system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.
type: string
size:
description: Size is the number of bytes in the file.
format: int64
type: integer
url:
- description: URL is the HTTP address of the Artifact as exposed
- by the controller managing the Source. It can be used to retrieve
- the Artifact for consumption, e.g. by another controller applying
- the Artifact contents.
+ description: |-
+ URL is the HTTP address of the Artifact as exposed by the controller
+ managing the Source. It can be used to retrieve the Artifact for
+ consumption, e.g. by another controller applying the Artifact contents.
type: string
required:
+ - digest
+ - lastUpdateTime
- path
+ - revision
- url
type: object
type: array
lastHandledReconcileAt:
- description: LastHandledReconcileAt holds the value of the most recent
- reconcile request value, so a change of the annotation value can
- be detected.
+ description: |-
+ LastHandledReconcileAt holds the value of the most recent
+ reconcile request value, so a change of the annotation value
+ can be detected.
type: string
observedGeneration:
- description: ObservedGeneration is the last observed generation of
- the GitRepository object.
+ description: |-
+ ObservedGeneration is the last observed generation of the GitRepository
+ object.
format: int64
type: integer
+ observedIgnore:
+ description: |-
+ ObservedIgnore is the observed exclusion patterns used for constructing
+ the source artifact.
+ type: string
+ observedInclude:
+ description: |-
+ ObservedInclude is the observed list of GitRepository resources used to
+ to produce the current Artifact.
+ items:
+ description: |-
+ GitRepositoryInclude specifies a local reference to a GitRepository which
+ Artifact (sub-)contents must be included, and where they should be placed.
+ properties:
+ fromPath:
+ description: |-
+ FromPath specifies the path to copy contents from, defaults to the root
+ of the Artifact.
+ type: string
+ repository:
+ description: |-
+ GitRepositoryRef specifies the GitRepository which Artifact contents
+ must be included.
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ toPath:
+ description: |-
+ ToPath specifies the path to copy contents to, defaults to the name of
+ the GitRepositoryRef.
+ type: string
+ required:
+ - repository
+ type: object
+ type: array
+ observedRecurseSubmodules:
+ description: |-
+ ObservedRecurseSubmodules is the observed resource submodules
+ configuration used to produce the current Artifact.
+ type: boolean
url:
- description: URL is the dynamic fetch link for the latest Artifact.
- It is provided on a "best effort" basis, and using the precise GitRepositoryStatus.Artifact
- data is recommended.
+ description: |-
+ URL is the dynamic fetch link for the latest Artifact.
+ It is provided on a "best effort" basis, and using the precise
+ GitRepositoryStatus.Artifact data is recommended.
type: string
type: object
type: object
served: true
- storage: true
+ storage: false
subresources:
status: {}
-status:
- acceptedNames:
- kind: ""
- plural: ""
- conditions: []
- storedVersions: []
diff --git a/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml b/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml
index 6b15e7bfb..0e57c72a5 100644
--- a/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml
+++ b/config/crd/bases/source.toolkit.fluxcd.io_helmcharts.yaml
@@ -1,11 +1,9 @@
-
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.7.0
- creationTimestamp: null
+ controller-gen.kubebuilder.io/version: v0.19.0
name: helmcharts.source.toolkit.fluxcd.io
spec:
group: source.toolkit.fluxcd.io
@@ -31,88 +29,79 @@ spec:
- jsonPath: .spec.sourceRef.name
name: Source Name
type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
- jsonPath: .status.conditions[?(@.type=="Ready")].status
name: Ready
type: string
- jsonPath: .status.conditions[?(@.type=="Ready")].message
name: Status
type: string
- - jsonPath: .metadata.creationTimestamp
- name: Age
- type: date
- name: v1beta1
+ name: v1
schema:
openAPIV3Schema:
- description: HelmChart is the Schema for the helmcharts API
+ description: HelmChart is the Schema for the helmcharts API.
properties:
apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
- description: HelmChartSpec defines the desired state of a Helm chart.
+ description: HelmChartSpec specifies the desired state of a Helm chart.
properties:
- accessFrom:
- description: AccessFrom defines an Access Control List for allowing
- cross-namespace references to this object.
- properties:
- namespaceSelectors:
- description: NamespaceSelectors is the list of namespace selectors
- to which this ACL applies. Items in this list are evaluated
- using a logical OR operation.
- items:
- description: NamespaceSelector selects the namespaces to which
- this ACL applies. An empty map of MatchLabels matches all
- namespaces in a cluster.
- properties:
- matchLabels:
- additionalProperties:
- type: string
- description: MatchLabels is a map of {key,value} pairs.
- A single {key,value} in the matchLabels map is equivalent
- to an element of matchExpressions, whose key field is
- "key", the operator is "In", and the values array contains
- only "value". The requirements are ANDed.
- type: object
- type: object
- type: array
- required:
- - namespaceSelectors
- type: object
chart:
- description: The name or path the Helm chart is available at in the
+ description: |-
+ Chart is the name or path the Helm chart is available at in the
SourceRef.
type: string
+ ignoreMissingValuesFiles:
+ description: |-
+ IgnoreMissingValuesFiles controls whether to silently ignore missing values
+ files rather than failing.
+ type: boolean
interval:
- description: The interval at which to check the Source for updates.
+ description: |-
+ Interval at which the HelmChart SourceRef is checked for updates.
+ This interval is approximate and may be subject to jitter to ensure
+ efficient use of resources.
+ pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$
type: string
reconcileStrategy:
default: ChartVersion
- description: Determines what enables the creation of a new artifact.
- Valid values are ('ChartVersion', 'Revision'). See the documentation
- of the values for an explanation on their behavior. Defaults to
- ChartVersion when omitted.
+ description: |-
+ ReconcileStrategy determines what enables the creation of a new artifact.
+ Valid values are ('ChartVersion', 'Revision').
+ See the documentation of the values for an explanation on their behavior.
+ Defaults to ChartVersion when omitted.
enum:
- ChartVersion
- Revision
type: string
sourceRef:
- description: The reference to the Source the chart is available at.
+ description: SourceRef is the reference to the Source the chart is
+ available at.
properties:
apiVersion:
description: APIVersion of the referent.
type: string
kind:
- description: Kind of the referent, valid values are ('HelmRepository',
- 'GitRepository', 'Bucket').
+ description: |-
+ Kind of the referent, valid values are ('HelmRepository', 'GitRepository',
+ 'Bucket').
enum:
- HelmRepository
- GitRepository
@@ -126,28 +115,83 @@ spec:
- name
type: object
suspend:
- description: This flag tells the controller to suspend the reconciliation
- of this source.
+ description: |-
+ Suspend tells the controller to suspend the reconciliation of this
+ source.
type: boolean
- valuesFile:
- description: Alternative values file to use as the default chart values,
- expected to be a relative path in the SourceRef. Deprecated in favor
- of ValuesFiles, for backwards compatibility the file defined here
- is merged before the ValuesFiles items. Ignored when omitted.
- type: string
valuesFiles:
- description: Alternative list of values files to use as the chart
- values (values.yaml is not included by default), expected to be
- a relative path in the SourceRef. Values files are merged in the
- order of this list with the last file overriding the first. Ignored
- when omitted.
+ description: |-
+ ValuesFiles is an alternative list of values files to use as the chart
+ values (values.yaml is not included by default), expected to be a
+ relative path in the SourceRef.
+ Values files are merged in the order of this list with the last file
+ overriding the first. Ignored when omitted.
items:
type: string
type: array
+ verify:
+ description: |-
+ Verify contains the secret name containing the trusted public keys
+ used to verify the signature and specifies which provider to use to check
+ whether OCI image is authentic.
+ This field is only supported when using HelmRepository source with spec.type 'oci'.
+ Chart dependencies, which are not bundled in the umbrella chart artifact, are not verified.
+ properties:
+ matchOIDCIdentity:
+ description: |-
+ MatchOIDCIdentity specifies the identity matching criteria to use
+ while verifying an OCI artifact which was signed using Cosign keyless
+ signing. The artifact's identity is deemed to be verified if any of the
+ specified matchers match against the identity.
+ items:
+ description: |-
+ OIDCIdentityMatch specifies options for verifying the certificate identity,
+ i.e. the issuer and the subject of the certificate.
+ properties:
+ issuer:
+ description: |-
+ Issuer specifies the regex pattern to match against to verify
+ the OIDC issuer in the Fulcio certificate. The pattern must be a
+ valid Go regular expression.
+ type: string
+ subject:
+ description: |-
+ Subject specifies the regex pattern to match against to verify
+ the identity subject in the Fulcio certificate. The pattern must
+ be a valid Go regular expression.
+ type: string
+ required:
+ - issuer
+ - subject
+ type: object
+ type: array
+ provider:
+ default: cosign
+ description: Provider specifies the technology used to sign the
+ OCI Artifact.
+ enum:
+ - cosign
+ - notation
+ type: string
+ secretRef:
+ description: |-
+ SecretRef specifies the Kubernetes Secret containing the
+ trusted public keys.
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ required:
+ - provider
+ type: object
version:
default: '*'
- description: The chart version semver expression, ignored for charts
- from GitRepository and Bucket sources. Defaults to latest when omitted.
+ description: |-
+ Version is the chart version semver expression, ignored for charts from
+ GitRepository and Bucket sources. Defaults to latest when omitted.
type: string
required:
- chart
@@ -157,76 +201,87 @@ spec:
status:
default:
observedGeneration: -1
- description: HelmChartStatus defines the observed state of the HelmChart.
+ description: HelmChartStatus records the observed state of the HelmChart.
properties:
artifact:
description: Artifact represents the output of the last successful
- chart sync.
+ reconciliation.
properties:
- checksum:
- description: Checksum is the SHA256 checksum of the artifact.
+ digest:
+ description: Digest is the digest of the file in the form of ':'.
+ pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$
type: string
lastUpdateTime:
- description: LastUpdateTime is the timestamp corresponding to
- the last update of this artifact.
+ description: |-
+ LastUpdateTime is the timestamp corresponding to the last update of the
+ Artifact.
format: date-time
type: string
+ metadata:
+ additionalProperties:
+ type: string
+ description: Metadata holds upstream information such as OCI annotations.
+ type: object
path:
- description: Path is the relative file path of this artifact.
+ description: |-
+ Path is the relative file path of the Artifact. It can be used to locate
+ the file in the root of the Artifact storage on the local file system of
+ the controller managing the Source.
type: string
revision:
- description: Revision is a human readable identifier traceable
- in the origin source system. It can be a Git commit SHA, Git
- tag, a Helm index timestamp, a Helm chart version, etc.
+ description: |-
+ Revision is a human-readable identifier traceable in the origin source
+ system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.
type: string
+ size:
+ description: Size is the number of bytes in the file.
+ format: int64
+ type: integer
url:
- description: URL is the HTTP address of this artifact.
+ description: |-
+ URL is the HTTP address of the Artifact as exposed by the controller
+ managing the Source. It can be used to retrieve the Artifact for
+ consumption, e.g. by another controller applying the Artifact contents.
type: string
required:
+ - digest
+ - lastUpdateTime
- path
+ - revision
- url
type: object
conditions:
description: Conditions holds the conditions for the HelmChart.
items:
- description: "Condition contains details for one aspect of the current
- state of this API Resource. --- This struct is intended for direct
- use as an array at the field path .status.conditions. For example,
- type FooStatus struct{ // Represents the observations of a
- foo's current state. // Known .status.conditions.type are:
- \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type
- \ // +patchStrategy=merge // +listType=map // +listMapKey=type
- \ Conditions []metav1.Condition `json:\"conditions,omitempty\"
- patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`
- \n // other fields }"
+ description: Condition contains details for one aspect of the current
+ state of this API Resource.
properties:
lastTransitionTime:
- description: lastTransitionTime is the last time the condition
- transitioned from one status to another. This should be when
- the underlying condition changed. If that is not known, then
- using the time when the API field changed is acceptable.
+ description: |-
+ lastTransitionTime is the last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
- description: message is a human readable message indicating
- details about the transition. This may be an empty string.
+ description: |-
+ message is a human readable message indicating details about the transition.
+ This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
- description: observedGeneration represents the .metadata.generation
- that the condition was set based upon. For instance, if .metadata.generation
- is currently 12, but the .status.conditions[x].observedGeneration
- is 9, the condition is out of date with respect to the current
- state of the instance.
+ description: |-
+ observedGeneration represents the .metadata.generation that the condition was set based upon.
+ For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
+ with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
- description: reason contains a programmatic identifier indicating
- the reason for the condition's last transition. Producers
- of specific condition types may define expected values and
- meanings for this field, and whether the values are considered
- a guaranteed API. The value should be a CamelCase string.
+ description: |-
+ reason contains a programmatic identifier indicating the reason for the condition's last transition.
+ Producers of specific condition types may define expected values and meanings for this field,
+ and whether the values are considered a guaranteed API.
+ The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
@@ -241,10 +296,6 @@ spec:
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
- --- Many .condition.type values are consistent across resources
- like Available, but because arbitrary conditions can be useful
- (see .node.status.conditions), the ability to deconflict is
- important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
@@ -257,21 +308,45 @@ spec:
type: object
type: array
lastHandledReconcileAt:
- description: LastHandledReconcileAt holds the value of the most recent
- reconcile request value, so a change of the annotation value can
- be detected.
+ description: |-
+ LastHandledReconcileAt holds the value of the most recent
+ reconcile request value, so a change of the annotation value
+ can be detected.
+ type: string
+ observedChartName:
+ description: |-
+ ObservedChartName is the last observed chart name as specified by the
+ resolved chart reference.
type: string
observedGeneration:
- description: ObservedGeneration is the last observed generation.
+ description: |-
+ ObservedGeneration is the last observed generation of the HelmChart
+ object.
format: int64
type: integer
+ observedSourceArtifactRevision:
+ description: |-
+ ObservedSourceArtifactRevision is the last observed Artifact.Revision
+ of the HelmChartSpec.SourceRef.
+ type: string
+ observedValuesFiles:
+ description: |-
+ ObservedValuesFiles are the observed value files of the last successful
+ reconciliation.
+ It matches the chart in the last successfully reconciled artifact.
+ items:
+ type: string
+ type: array
url:
- description: URL is the download link for the last chart pulled.
+ description: |-
+ URL is the dynamic fetch link for the latest Artifact.
+ It is provided on a "best effort" basis, and using the precise
+ BucketStatus.Artifact data is recommended.
type: string
type: object
type: object
served: true
- storage: false
+ storage: true
subresources:
status: {}
- additionalPrinterColumns:
@@ -296,20 +371,27 @@ spec:
- jsonPath: .status.conditions[?(@.type=="Ready")].message
name: Status
type: string
+ deprecated: true
+ deprecationWarning: v1beta2 HelmChart is deprecated, upgrade to v1
name: v1beta2
schema:
openAPIV3Schema:
description: HelmChart is the Schema for the helmcharts API.
properties:
apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
@@ -317,27 +399,27 @@ spec:
description: HelmChartSpec specifies the desired state of a Helm chart.
properties:
accessFrom:
- description: 'AccessFrom specifies an Access Control List for allowing
- cross-namespace references to this object. NOTE: Not implemented,
- provisional as of https://github.com/fluxcd/flux2/pull/2092'
+ description: |-
+ AccessFrom specifies an Access Control List for allowing cross-namespace
+ references to this object.
+ NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092
properties:
namespaceSelectors:
- description: NamespaceSelectors is the list of namespace selectors
- to which this ACL applies. Items in this list are evaluated
- using a logical OR operation.
+ description: |-
+ NamespaceSelectors is the list of namespace selectors to which this ACL applies.
+ Items in this list are evaluated using a logical OR operation.
items:
- description: NamespaceSelector selects the namespaces to which
- this ACL applies. An empty map of MatchLabels matches all
- namespaces in a cluster.
+ description: |-
+ NamespaceSelector selects the namespaces to which this ACL applies.
+ An empty map of MatchLabels matches all namespaces in a cluster.
properties:
matchLabels:
additionalProperties:
type: string
- description: MatchLabels is a map of {key,value} pairs.
- A single {key,value} in the matchLabels map is equivalent
- to an element of matchExpressions, whose key field is
- "key", the operator is "In", and the values array contains
- only "value". The requirements are ANDed.
+ description: |-
+ MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
type: array
@@ -345,19 +427,29 @@ spec:
- namespaceSelectors
type: object
chart:
- description: Chart is the name or path the Helm chart is available
- at in the SourceRef.
+ description: |-
+ Chart is the name or path the Helm chart is available at in the
+ SourceRef.
type: string
+ ignoreMissingValuesFiles:
+ description: |-
+ IgnoreMissingValuesFiles controls whether to silently ignore missing values
+ files rather than failing.
+ type: boolean
interval:
- description: Interval is the interval at which to check the Source
- for updates.
+ description: |-
+ Interval at which the HelmChart SourceRef is checked for updates.
+ This interval is approximate and may be subject to jitter to ensure
+ efficient use of resources.
+ pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$
type: string
reconcileStrategy:
default: ChartVersion
- description: ReconcileStrategy determines what enables the creation
- of a new artifact. Valid values are ('ChartVersion', 'Revision').
- See the documentation of the values for an explanation on their
- behavior. Defaults to ChartVersion when omitted.
+ description: |-
+ ReconcileStrategy determines what enables the creation of a new artifact.
+ Valid values are ('ChartVersion', 'Revision').
+ See the documentation of the values for an explanation on their behavior.
+ Defaults to ChartVersion when omitted.
enum:
- ChartVersion
- Revision
@@ -370,8 +462,9 @@ spec:
description: APIVersion of the referent.
type: string
kind:
- description: Kind of the referent, valid values are ('HelmRepository',
- 'GitRepository', 'Bucket').
+ description: |-
+ Kind of the referent, valid values are ('HelmRepository', 'GitRepository',
+ 'Bucket').
enum:
- HelmRepository
- GitRepository
@@ -385,30 +478,90 @@ spec:
- name
type: object
suspend:
- description: Suspend tells the controller to suspend the reconciliation
- of this source.
+ description: |-
+ Suspend tells the controller to suspend the reconciliation of this
+ source.
type: boolean
valuesFile:
- description: ValuesFile is an alternative values file to use as the
- default chart values, expected to be a relative path in the SourceRef.
- Deprecated in favor of ValuesFiles, for backwards compatibility
- the file specified here is merged before the ValuesFiles items.
- Ignored when omitted.
+ description: |-
+ ValuesFile is an alternative values file to use as the default chart
+ values, expected to be a relative path in the SourceRef. Deprecated in
+ favor of ValuesFiles, for backwards compatibility the file specified here
+ is merged before the ValuesFiles items. Ignored when omitted.
type: string
valuesFiles:
- description: ValuesFiles is an alternative list of values files to
- use as the chart values (values.yaml is not included by default),
- expected to be a relative path in the SourceRef. Values files are
- merged in the order of this list with the last file overriding the
- first. Ignored when omitted.
+ description: |-
+ ValuesFiles is an alternative list of values files to use as the chart
+ values (values.yaml is not included by default), expected to be a
+ relative path in the SourceRef.
+ Values files are merged in the order of this list with the last file
+ overriding the first. Ignored when omitted.
items:
type: string
type: array
+ verify:
+ description: |-
+ Verify contains the secret name containing the trusted public keys
+ used to verify the signature and specifies which provider to use to check
+ whether OCI image is authentic.
+ This field is only supported when using HelmRepository source with spec.type 'oci'.
+ Chart dependencies, which are not bundled in the umbrella chart artifact, are not verified.
+ properties:
+ matchOIDCIdentity:
+ description: |-
+ MatchOIDCIdentity specifies the identity matching criteria to use
+ while verifying an OCI artifact which was signed using Cosign keyless
+ signing. The artifact's identity is deemed to be verified if any of the
+ specified matchers match against the identity.
+ items:
+ description: |-
+ OIDCIdentityMatch specifies options for verifying the certificate identity,
+ i.e. the issuer and the subject of the certificate.
+ properties:
+ issuer:
+ description: |-
+ Issuer specifies the regex pattern to match against to verify
+ the OIDC issuer in the Fulcio certificate. The pattern must be a
+ valid Go regular expression.
+ type: string
+ subject:
+ description: |-
+ Subject specifies the regex pattern to match against to verify
+ the identity subject in the Fulcio certificate. The pattern must
+ be a valid Go regular expression.
+ type: string
+ required:
+ - issuer
+ - subject
+ type: object
+ type: array
+ provider:
+ default: cosign
+ description: Provider specifies the technology used to sign the
+ OCI Artifact.
+ enum:
+ - cosign
+ - notation
+ type: string
+ secretRef:
+ description: |-
+ SecretRef specifies the Kubernetes Secret containing the
+ trusted public keys.
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ required:
+ - provider
+ type: object
version:
default: '*'
- description: Version is the chart version semver expression, ignored
- for charts from GitRepository and Bucket sources. Defaults to latest
- when omitted.
+ description: |-
+ Version is the chart version semver expression, ignored for charts from
+ GitRepository and Bucket sources. Defaults to latest when omitted.
type: string
required:
- chart
@@ -424,12 +577,14 @@ spec:
description: Artifact represents the output of the last successful
reconciliation.
properties:
- checksum:
- description: Checksum is the SHA256 checksum of the Artifact file.
+ digest:
+ description: Digest is the digest of the file in the form of ':'.
+ pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$
type: string
lastUpdateTime:
- description: LastUpdateTime is the timestamp corresponding to
- the last update of the Artifact.
+ description: |-
+ LastUpdateTime is the timestamp corresponding to the last update of the
+ Artifact.
format: date-time
type: string
metadata:
@@ -438,70 +593,65 @@ spec:
description: Metadata holds upstream information such as OCI annotations.
type: object
path:
- description: Path is the relative file path of the Artifact. It
- can be used to locate the file in the root of the Artifact storage
- on the local file system of the controller managing the Source.
+ description: |-
+ Path is the relative file path of the Artifact. It can be used to locate
+ the file in the root of the Artifact storage on the local file system of
+ the controller managing the Source.
type: string
revision:
- description: Revision is a human-readable identifier traceable
- in the origin source system. It can be a Git commit SHA, Git
- tag, a Helm chart version, etc.
+ description: |-
+ Revision is a human-readable identifier traceable in the origin source
+ system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.
type: string
size:
description: Size is the number of bytes in the file.
format: int64
type: integer
url:
- description: URL is the HTTP address of the Artifact as exposed
- by the controller managing the Source. It can be used to retrieve
- the Artifact for consumption, e.g. by another controller applying
- the Artifact contents.
+ description: |-
+ URL is the HTTP address of the Artifact as exposed by the controller
+ managing the Source. It can be used to retrieve the Artifact for
+ consumption, e.g. by another controller applying the Artifact contents.
type: string
required:
+ - digest
+ - lastUpdateTime
- path
+ - revision
- url
type: object
conditions:
description: Conditions holds the conditions for the HelmChart.
items:
- description: "Condition contains details for one aspect of the current
- state of this API Resource. --- This struct is intended for direct
- use as an array at the field path .status.conditions. For example,
- type FooStatus struct{ // Represents the observations of a
- foo's current state. // Known .status.conditions.type are:
- \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type
- \ // +patchStrategy=merge // +listType=map // +listMapKey=type
- \ Conditions []metav1.Condition `json:\"conditions,omitempty\"
- patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`
- \n // other fields }"
+ description: Condition contains details for one aspect of the current
+ state of this API Resource.
properties:
lastTransitionTime:
- description: lastTransitionTime is the last time the condition
- transitioned from one status to another. This should be when
- the underlying condition changed. If that is not known, then
- using the time when the API field changed is acceptable.
+ description: |-
+ lastTransitionTime is the last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
- description: message is a human readable message indicating
- details about the transition. This may be an empty string.
+ description: |-
+ message is a human readable message indicating details about the transition.
+ This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
- description: observedGeneration represents the .metadata.generation
- that the condition was set based upon. For instance, if .metadata.generation
- is currently 12, but the .status.conditions[x].observedGeneration
- is 9, the condition is out of date with respect to the current
- state of the instance.
+ description: |-
+ observedGeneration represents the .metadata.generation that the condition was set based upon.
+ For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
+ with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
- description: reason contains a programmatic identifier indicating
- the reason for the condition's last transition. Producers
- of specific condition types may define expected values and
- meanings for this field, and whether the values are considered
- a guaranteed API. The value should be a CamelCase string.
+ description: |-
+ reason contains a programmatic identifier indicating the reason for the condition's last transition.
+ Producers of specific condition types may define expected values and meanings for this field,
+ and whether the values are considered a guaranteed API.
+ The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
@@ -516,10 +666,6 @@ spec:
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
- --- Many .condition.type values are consistent across resources
- like Available, but because arbitrary conditions can be useful
- (see .node.status.conditions), the ability to deconflict is
- important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
@@ -532,37 +678,44 @@ spec:
type: object
type: array
lastHandledReconcileAt:
- description: LastHandledReconcileAt holds the value of the most recent
- reconcile request value, so a change of the annotation value can
- be detected.
+ description: |-
+ LastHandledReconcileAt holds the value of the most recent
+ reconcile request value, so a change of the annotation value
+ can be detected.
type: string
observedChartName:
- description: ObservedChartName is the last observed chart name as
- specified by the resolved chart reference.
+ description: |-
+ ObservedChartName is the last observed chart name as specified by the
+ resolved chart reference.
type: string
observedGeneration:
- description: ObservedGeneration is the last observed generation of
- the HelmChart object.
+ description: |-
+ ObservedGeneration is the last observed generation of the HelmChart
+ object.
format: int64
type: integer
observedSourceArtifactRevision:
- description: ObservedSourceArtifactRevision is the last observed Artifact.Revision
+ description: |-
+ ObservedSourceArtifactRevision is the last observed Artifact.Revision
of the HelmChartSpec.SourceRef.
type: string
+ observedValuesFiles:
+ description: |-
+ ObservedValuesFiles are the observed value files of the last successful
+ reconciliation.
+ It matches the chart in the last successfully reconciled artifact.
+ items:
+ type: string
+ type: array
url:
- description: URL is the dynamic fetch link for the latest Artifact.
- It is provided on a "best effort" basis, and using the precise BucketStatus.Artifact
- data is recommended.
+ description: |-
+ URL is the dynamic fetch link for the latest Artifact.
+ It is provided on a "best effort" basis, and using the precise
+ BucketStatus.Artifact data is recommended.
type: string
type: object
type: object
served: true
- storage: true
+ storage: false
subresources:
status: {}
-status:
- acceptedNames:
- kind: ""
- plural: ""
- conditions: []
- storedVersions: []
diff --git a/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml
index c19552fdd..750a36500 100644
--- a/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml
+++ b/config/crd/bases/source.toolkit.fluxcd.io_helmrepositories.yaml
@@ -1,11 +1,9 @@
-
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.7.0
- creationTimestamp: null
+ controller-gen.kubebuilder.io/version: v0.19.0
name: helmrepositories.source.toolkit.fluxcd.io
spec:
group: source.toolkit.fluxcd.io
@@ -22,78 +20,135 @@ spec:
- jsonPath: .spec.url
name: URL
type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
- jsonPath: .status.conditions[?(@.type=="Ready")].status
name: Ready
type: string
- jsonPath: .status.conditions[?(@.type=="Ready")].message
name: Status
type: string
- - jsonPath: .metadata.creationTimestamp
- name: Age
- type: date
- name: v1beta1
+ name: v1
schema:
openAPIV3Schema:
- description: HelmRepository is the Schema for the helmrepositories API
+ description: HelmRepository is the Schema for the helmrepositories API.
properties:
apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
- description: HelmRepositorySpec defines the reference to a Helm repository.
+ description: |-
+ HelmRepositorySpec specifies the required configuration to produce an
+ Artifact for a Helm repository index YAML.
properties:
accessFrom:
- description: AccessFrom defines an Access Control List for allowing
- cross-namespace references to this object.
+ description: |-
+ AccessFrom specifies an Access Control List for allowing cross-namespace
+ references to this object.
+ NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092
properties:
namespaceSelectors:
- description: NamespaceSelectors is the list of namespace selectors
- to which this ACL applies. Items in this list are evaluated
- using a logical OR operation.
+ description: |-
+ NamespaceSelectors is the list of namespace selectors to which this ACL applies.
+ Items in this list are evaluated using a logical OR operation.
items:
- description: NamespaceSelector selects the namespaces to which
- this ACL applies. An empty map of MatchLabels matches all
- namespaces in a cluster.
+ description: |-
+ NamespaceSelector selects the namespaces to which this ACL applies.
+ An empty map of MatchLabels matches all namespaces in a cluster.
properties:
matchLabels:
additionalProperties:
type: string
- description: MatchLabels is a map of {key,value} pairs.
- A single {key,value} in the matchLabels map is equivalent
- to an element of matchExpressions, whose key field is
- "key", the operator is "In", and the values array contains
- only "value". The requirements are ANDed.
+ description: |-
+ MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
type: array
required:
- namespaceSelectors
type: object
+ certSecretRef:
+ description: |-
+ CertSecretRef can be given the name of a Secret containing
+ either or both of
+
+ - a PEM-encoded client certificate (`tls.crt`) and private
+ key (`tls.key`);
+ - a PEM-encoded CA certificate (`ca.crt`)
+
+ and whichever are supplied, will be used for connecting to the
+ registry. The client cert and key are useful if you are
+ authenticating with a certificate; the CA cert is useful if
+ you are using a self-signed server certificate. The Secret must
+ be of type `Opaque` or `kubernetes.io/tls`.
+
+ It takes precedence over the values specified in the Secret referred
+ to by `.spec.secretRef`.
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ insecure:
+ description: |-
+ Insecure allows connecting to a non-TLS HTTP container registry.
+ This field is only taken into account if the .spec.type field is set to 'oci'.
+ type: boolean
interval:
- description: The interval at which to check the upstream for updates.
+ description: |-
+ Interval at which the HelmRepository URL is checked for updates.
+ This interval is approximate and may be subject to jitter to ensure
+ efficient use of resources.
+ pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$
type: string
passCredentials:
- description: PassCredentials allows the credentials from the SecretRef
- to be passed on to a host that does not match the host as defined
- in URL. This may be required if the host of the advertised chart
- URLs in the index differ from the defined URL. Enabling this should
- be done with caution, as it can potentially result in credentials
- getting stolen in a MITM-attack.
+ description: |-
+ PassCredentials allows the credentials from the SecretRef to be passed
+ on to a host that does not match the host as defined in URL.
+ This may be required if the host of the advertised chart URLs in the
+ index differ from the defined URL.
+ Enabling this should be done with caution, as it can potentially result
+ in credentials getting stolen in a MITM-attack.
type: boolean
+ provider:
+ default: generic
+ description: |-
+ Provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'.
+ This field is optional, and only taken into account if the .spec.type field is set to 'oci'.
+ When not specified, defaults to 'generic'.
+ enum:
+ - generic
+ - aws
+ - azure
+ - gcp
+ type: string
secretRef:
- description: The name of the secret containing authentication credentials
- for the Helm repository. For HTTP/S basic auth the secret must contain
- username and password fields. For TLS the secret must contain a
- certFile and keyFile, and/or caCert fields.
+ description: |-
+ SecretRef specifies the Secret containing authentication credentials
+ for the HelmRepository.
+ For HTTP/S basic auth the secret must contain 'username' and 'password'
+ fields.
+ Support for TLS auth using the 'certFile' and 'keyFile', and/or 'caFile'
+ keys is deprecated. Please use `.spec.certSecretRef` instead.
properties:
name:
description: Name of the referent.
@@ -102,94 +157,119 @@ spec:
- name
type: object
suspend:
- description: This flag tells the controller to suspend the reconciliation
- of this source.
+ description: |-
+ Suspend tells the controller to suspend the reconciliation of this
+ HelmRepository.
type: boolean
timeout:
- default: 60s
- description: The timeout of index downloading, defaults to 60s.
+ description: |-
+ Timeout is used for the index fetch operation for an HTTPS helm repository,
+ and for remote OCI Repository operations like pulling for an OCI helm
+ chart by the associated HelmChart.
+ Its default value is 60s.
+ pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$
+ type: string
+ type:
+ description: |-
+ Type of the HelmRepository.
+ When this field is set to "oci", the URL field value must be prefixed with "oci://".
+ enum:
+ - default
+ - oci
type: string
url:
- description: The Helm repository URL, a valid URL contains at least
- a protocol and host.
+ description: |-
+ URL of the Helm repository, a valid URL contains at least a protocol and
+ host.
+ pattern: ^(http|https|oci)://.*$
type: string
required:
- - interval
- url
type: object
status:
default:
observedGeneration: -1
- description: HelmRepositoryStatus defines the observed state of the HelmRepository.
+ description: HelmRepositoryStatus records the observed state of the HelmRepository.
properties:
artifact:
- description: Artifact represents the output of the last successful
- repository sync.
+ description: Artifact represents the last successful HelmRepository
+ reconciliation.
properties:
- checksum:
- description: Checksum is the SHA256 checksum of the artifact.
+ digest:
+ description: Digest is the digest of the file in the form of ':'.
+ pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$
type: string
lastUpdateTime:
- description: LastUpdateTime is the timestamp corresponding to
- the last update of this artifact.
+ description: |-
+ LastUpdateTime is the timestamp corresponding to the last update of the
+ Artifact.
format: date-time
type: string
+ metadata:
+ additionalProperties:
+ type: string
+ description: Metadata holds upstream information such as OCI annotations.
+ type: object
path:
- description: Path is the relative file path of this artifact.
+ description: |-
+ Path is the relative file path of the Artifact. It can be used to locate
+ the file in the root of the Artifact storage on the local file system of
+ the controller managing the Source.
type: string
revision:
- description: Revision is a human readable identifier traceable
- in the origin source system. It can be a Git commit SHA, Git
- tag, a Helm index timestamp, a Helm chart version, etc.
+ description: |-
+ Revision is a human-readable identifier traceable in the origin source
+ system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.
type: string
+ size:
+ description: Size is the number of bytes in the file.
+ format: int64
+ type: integer
url:
- description: URL is the HTTP address of this artifact.
+ description: |-
+ URL is the HTTP address of the Artifact as exposed by the controller
+ managing the Source. It can be used to retrieve the Artifact for
+ consumption, e.g. by another controller applying the Artifact contents.
type: string
required:
+ - digest
+ - lastUpdateTime
- path
+ - revision
- url
type: object
conditions:
description: Conditions holds the conditions for the HelmRepository.
items:
- description: "Condition contains details for one aspect of the current
- state of this API Resource. --- This struct is intended for direct
- use as an array at the field path .status.conditions. For example,
- type FooStatus struct{ // Represents the observations of a
- foo's current state. // Known .status.conditions.type are:
- \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type
- \ // +patchStrategy=merge // +listType=map // +listMapKey=type
- \ Conditions []metav1.Condition `json:\"conditions,omitempty\"
- patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`
- \n // other fields }"
+ description: Condition contains details for one aspect of the current
+ state of this API Resource.
properties:
lastTransitionTime:
- description: lastTransitionTime is the last time the condition
- transitioned from one status to another. This should be when
- the underlying condition changed. If that is not known, then
- using the time when the API field changed is acceptable.
+ description: |-
+ lastTransitionTime is the last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
- description: message is a human readable message indicating
- details about the transition. This may be an empty string.
+ description: |-
+ message is a human readable message indicating details about the transition.
+ This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
- description: observedGeneration represents the .metadata.generation
- that the condition was set based upon. For instance, if .metadata.generation
- is currently 12, but the .status.conditions[x].observedGeneration
- is 9, the condition is out of date with respect to the current
- state of the instance.
+ description: |-
+ observedGeneration represents the .metadata.generation that the condition was set based upon.
+ For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
+ with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
- description: reason contains a programmatic identifier indicating
- the reason for the condition's last transition. Producers
- of specific condition types may define expected values and
- meanings for this field, and whether the values are considered
- a guaranteed API. The value should be a CamelCase string.
+ description: |-
+ reason contains a programmatic identifier indicating the reason for the condition's last transition.
+ Producers of specific condition types may define expected values and meanings for this field,
+ and whether the values are considered a guaranteed API.
+ The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
@@ -204,10 +284,6 @@ spec:
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
- --- Many .condition.type values are consistent across resources
- like Available, but because arbitrary conditions can be useful
- (see .node.status.conditions), the ability to deconflict is
- important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
@@ -220,21 +296,27 @@ spec:
type: object
type: array
lastHandledReconcileAt:
- description: LastHandledReconcileAt holds the value of the most recent
- reconcile request value, so a change of the annotation value can
- be detected.
+ description: |-
+ LastHandledReconcileAt holds the value of the most recent
+ reconcile request value, so a change of the annotation value
+ can be detected.
type: string
observedGeneration:
- description: ObservedGeneration is the last observed generation.
+ description: |-
+ ObservedGeneration is the last observed generation of the HelmRepository
+ object.
format: int64
type: integer
url:
- description: URL is the download link for the last index fetched.
+ description: |-
+ URL is the dynamic fetch link for the latest Artifact.
+ It is provided on a "best effort" basis, and using the precise
+ HelmRepositoryStatus.Artifact data is recommended.
type: string
type: object
type: object
served: true
- storage: false
+ storage: true
subresources:
status: {}
- additionalPrinterColumns:
@@ -250,71 +332,128 @@ spec:
- jsonPath: .status.conditions[?(@.type=="Ready")].message
name: Status
type: string
+ deprecated: true
+ deprecationWarning: v1beta2 HelmRepository is deprecated, upgrade to v1
name: v1beta2
schema:
openAPIV3Schema:
description: HelmRepository is the Schema for the helmrepositories API.
properties:
apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
- description: HelmRepositorySpec specifies the required configuration to
- produce an Artifact for a Helm repository index YAML.
+ description: |-
+ HelmRepositorySpec specifies the required configuration to produce an
+ Artifact for a Helm repository index YAML.
properties:
accessFrom:
- description: 'AccessFrom specifies an Access Control List for allowing
- cross-namespace references to this object. NOTE: Not implemented,
- provisional as of https://github.com/fluxcd/flux2/pull/2092'
+ description: |-
+ AccessFrom specifies an Access Control List for allowing cross-namespace
+ references to this object.
+ NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092
properties:
namespaceSelectors:
- description: NamespaceSelectors is the list of namespace selectors
- to which this ACL applies. Items in this list are evaluated
- using a logical OR operation.
+ description: |-
+ NamespaceSelectors is the list of namespace selectors to which this ACL applies.
+ Items in this list are evaluated using a logical OR operation.
items:
- description: NamespaceSelector selects the namespaces to which
- this ACL applies. An empty map of MatchLabels matches all
- namespaces in a cluster.
+ description: |-
+ NamespaceSelector selects the namespaces to which this ACL applies.
+ An empty map of MatchLabels matches all namespaces in a cluster.
properties:
matchLabels:
additionalProperties:
type: string
- description: MatchLabels is a map of {key,value} pairs.
- A single {key,value} in the matchLabels map is equivalent
- to an element of matchExpressions, whose key field is
- "key", the operator is "In", and the values array contains
- only "value". The requirements are ANDed.
+ description: |-
+ MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
type: array
required:
- namespaceSelectors
type: object
+ certSecretRef:
+ description: |-
+ CertSecretRef can be given the name of a Secret containing
+ either or both of
+
+ - a PEM-encoded client certificate (`tls.crt`) and private
+ key (`tls.key`);
+ - a PEM-encoded CA certificate (`ca.crt`)
+
+ and whichever are supplied, will be used for connecting to the
+ registry. The client cert and key are useful if you are
+ authenticating with a certificate; the CA cert is useful if
+ you are using a self-signed server certificate. The Secret must
+ be of type `Opaque` or `kubernetes.io/tls`.
+
+ It takes precedence over the values specified in the Secret referred
+ to by `.spec.secretRef`.
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ insecure:
+ description: |-
+ Insecure allows connecting to a non-TLS HTTP container registry.
+ This field is only taken into account if the .spec.type field is set to 'oci'.
+ type: boolean
interval:
- description: Interval at which to check the URL for updates.
+ description: |-
+ Interval at which the HelmRepository URL is checked for updates.
+ This interval is approximate and may be subject to jitter to ensure
+ efficient use of resources.
+ pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$
type: string
passCredentials:
- description: PassCredentials allows the credentials from the SecretRef
- to be passed on to a host that does not match the host as defined
- in URL. This may be required if the host of the advertised chart
- URLs in the index differ from the defined URL. Enabling this should
- be done with caution, as it can potentially result in credentials
- getting stolen in a MITM-attack.
+ description: |-
+ PassCredentials allows the credentials from the SecretRef to be passed
+ on to a host that does not match the host as defined in URL.
+ This may be required if the host of the advertised chart URLs in the
+ index differ from the defined URL.
+ Enabling this should be done with caution, as it can potentially result
+ in credentials getting stolen in a MITM-attack.
type: boolean
+ provider:
+ default: generic
+ description: |-
+ Provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'.
+ This field is optional, and only taken into account if the .spec.type field is set to 'oci'.
+ When not specified, defaults to 'generic'.
+ enum:
+ - generic
+ - aws
+ - azure
+ - gcp
+ type: string
secretRef:
- description: SecretRef specifies the Secret containing authentication
- credentials for the HelmRepository. For HTTP/S basic auth the secret
- must contain 'username' and 'password' fields. For TLS the secret
- must contain a 'certFile' and 'keyFile', and/or 'caCert' fields.
+ description: |-
+ SecretRef specifies the Secret containing authentication credentials
+ for the HelmRepository.
+ For HTTP/S basic auth the secret must contain 'username' and 'password'
+ fields.
+ Support for TLS auth using the 'certFile' and 'keyFile', and/or 'caFile'
+ keys is deprecated. Please use `.spec.certSecretRef` instead.
properties:
name:
description: Name of the referent.
@@ -323,26 +462,33 @@ spec:
- name
type: object
suspend:
- description: Suspend tells the controller to suspend the reconciliation
- of this HelmRepository.
+ description: |-
+ Suspend tells the controller to suspend the reconciliation of this
+ HelmRepository.
type: boolean
timeout:
- default: 60s
- description: Timeout of the index fetch operation, defaults to 60s.
+ description: |-
+ Timeout is used for the index fetch operation for an HTTPS helm repository,
+ and for remote OCI Repository operations like pulling for an OCI helm
+ chart by the associated HelmChart.
+ Its default value is 60s.
+ pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$
type: string
type:
- description: Type of the HelmRepository. When this field is set to "oci",
- the URL field value must be prefixed with "oci://".
+ description: |-
+ Type of the HelmRepository.
+ When this field is set to "oci", the URL field value must be prefixed with "oci://".
enum:
- default
- oci
type: string
url:
- description: URL of the Helm repository, a valid URL contains at least
- a protocol and host.
+ description: |-
+ URL of the Helm repository, a valid URL contains at least a protocol and
+ host.
+ pattern: ^(http|https|oci)://.*$
type: string
required:
- - interval
- url
type: object
status:
@@ -354,12 +500,14 @@ spec:
description: Artifact represents the last successful HelmRepository
reconciliation.
properties:
- checksum:
- description: Checksum is the SHA256 checksum of the Artifact file.
+ digest:
+ description: Digest is the digest of the file in the form of ':'.
+ pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$
type: string
lastUpdateTime:
- description: LastUpdateTime is the timestamp corresponding to
- the last update of the Artifact.
+ description: |-
+ LastUpdateTime is the timestamp corresponding to the last update of the
+ Artifact.
format: date-time
type: string
metadata:
@@ -368,70 +516,65 @@ spec:
description: Metadata holds upstream information such as OCI annotations.
type: object
path:
- description: Path is the relative file path of the Artifact. It
- can be used to locate the file in the root of the Artifact storage
- on the local file system of the controller managing the Source.
+ description: |-
+ Path is the relative file path of the Artifact. It can be used to locate
+ the file in the root of the Artifact storage on the local file system of
+ the controller managing the Source.
type: string
revision:
- description: Revision is a human-readable identifier traceable
- in the origin source system. It can be a Git commit SHA, Git
- tag, a Helm chart version, etc.
+ description: |-
+ Revision is a human-readable identifier traceable in the origin source
+ system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.
type: string
size:
description: Size is the number of bytes in the file.
format: int64
type: integer
url:
- description: URL is the HTTP address of the Artifact as exposed
- by the controller managing the Source. It can be used to retrieve
- the Artifact for consumption, e.g. by another controller applying
- the Artifact contents.
+ description: |-
+ URL is the HTTP address of the Artifact as exposed by the controller
+ managing the Source. It can be used to retrieve the Artifact for
+ consumption, e.g. by another controller applying the Artifact contents.
type: string
required:
+ - digest
+ - lastUpdateTime
- path
+ - revision
- url
type: object
conditions:
description: Conditions holds the conditions for the HelmRepository.
items:
- description: "Condition contains details for one aspect of the current
- state of this API Resource. --- This struct is intended for direct
- use as an array at the field path .status.conditions. For example,
- type FooStatus struct{ // Represents the observations of a
- foo's current state. // Known .status.conditions.type are:
- \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type
- \ // +patchStrategy=merge // +listType=map // +listMapKey=type
- \ Conditions []metav1.Condition `json:\"conditions,omitempty\"
- patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`
- \n // other fields }"
+ description: Condition contains details for one aspect of the current
+ state of this API Resource.
properties:
lastTransitionTime:
- description: lastTransitionTime is the last time the condition
- transitioned from one status to another. This should be when
- the underlying condition changed. If that is not known, then
- using the time when the API field changed is acceptable.
+ description: |-
+ lastTransitionTime is the last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
- description: message is a human readable message indicating
- details about the transition. This may be an empty string.
+ description: |-
+ message is a human readable message indicating details about the transition.
+ This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
- description: observedGeneration represents the .metadata.generation
- that the condition was set based upon. For instance, if .metadata.generation
- is currently 12, but the .status.conditions[x].observedGeneration
- is 9, the condition is out of date with respect to the current
- state of the instance.
+ description: |-
+ observedGeneration represents the .metadata.generation that the condition was set based upon.
+ For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
+ with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
- description: reason contains a programmatic identifier indicating
- the reason for the condition's last transition. Producers
- of specific condition types may define expected values and
- meanings for this field, and whether the values are considered
- a guaranteed API. The value should be a CamelCase string.
+ description: |-
+ reason contains a programmatic identifier indicating the reason for the condition's last transition.
+ Producers of specific condition types may define expected values and meanings for this field,
+ and whether the values are considered a guaranteed API.
+ The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
@@ -446,10 +589,6 @@ spec:
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
- --- Many .condition.type values are consistent across resources
- like Available, but because arbitrary conditions can be useful
- (see .node.status.conditions), the ability to deconflict is
- important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
@@ -462,29 +601,26 @@ spec:
type: object
type: array
lastHandledReconcileAt:
- description: LastHandledReconcileAt holds the value of the most recent
- reconcile request value, so a change of the annotation value can
- be detected.
+ description: |-
+ LastHandledReconcileAt holds the value of the most recent
+ reconcile request value, so a change of the annotation value
+ can be detected.
type: string
observedGeneration:
- description: ObservedGeneration is the last observed generation of
- the HelmRepository object.
+ description: |-
+ ObservedGeneration is the last observed generation of the HelmRepository
+ object.
format: int64
type: integer
url:
- description: URL is the dynamic fetch link for the latest Artifact.
- It is provided on a "best effort" basis, and using the precise HelmRepositoryStatus.Artifact
- data is recommended.
+ description: |-
+ URL is the dynamic fetch link for the latest Artifact.
+ It is provided on a "best effort" basis, and using the precise
+ HelmRepositoryStatus.Artifact data is recommended.
type: string
type: object
type: object
served: true
- storage: true
+ storage: false
subresources:
status: {}
-status:
- acceptedNames:
- kind: ""
- plural: ""
- conditions: []
- storedVersions: []
diff --git a/config/crd/bases/source.toolkit.fluxcd.io_ocirepositories.yaml b/config/crd/bases/source.toolkit.fluxcd.io_ocirepositories.yaml
index 5e214ccd8..05b7b96ab 100644
--- a/config/crd/bases/source.toolkit.fluxcd.io_ocirepositories.yaml
+++ b/config/crd/bases/source.toolkit.fluxcd.io_ocirepositories.yaml
@@ -1,11 +1,9 @@
-
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.7.0
- creationTimestamp: null
+ controller-gen.kubebuilder.io/version: v0.19.0
name: ocirepositories.source.toolkit.fluxcd.io
spec:
group: source.toolkit.fluxcd.io
@@ -31,20 +29,25 @@ spec:
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
- name: v1beta2
+ name: v1
schema:
openAPIV3Schema:
description: OCIRepository is the Schema for the ocirepositories API
properties:
apiVersion:
- description: 'APIVersion defines the versioned schema of this representation
- of an object. Servers should convert recognized schemas to the latest
- internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
- description: 'Kind is a string value representing the REST resource this
- object represents. Servers may infer this from the endpoint the client
- submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
@@ -52,13 +55,19 @@ spec:
description: OCIRepositorySpec defines the desired state of OCIRepository
properties:
certSecretRef:
- description: "CertSecretRef can be given the name of a secret containing
- either or both of \n - a PEM-encoded client certificate (`certFile`)
- and private key (`keyFile`); - a PEM-encoded CA certificate (`caFile`)
- \n and whichever are supplied, will be used for connecting to the
- \ registry. The client cert and key are useful if you are authenticating
- with a certificate; the CA cert is useful if you are using a self-signed
- server certificate."
+ description: |-
+ CertSecretRef can be given the name of a Secret containing
+ either or both of
+
+ - a PEM-encoded client certificate (`tls.crt`) and private
+ key (`tls.key`);
+ - a PEM-encoded CA certificate (`ca.crt`)
+
+ and whichever are supplied, will be used for connecting to the
+ registry. The client cert and key are useful if you are
+ authenticating with a certificate; the CA cert is useful if
+ you are using a self-signed server certificate. The Secret must
+ be of type `Opaque` or `kubernetes.io/tls`.
properties:
name:
description: Name of the referent.
@@ -67,44 +76,94 @@ spec:
- name
type: object
ignore:
- description: Ignore overrides the set of excluded patterns in the
- .sourceignore format (which is the same as .gitignore). If not provided,
- a default will be used, consult the documentation for your version
- to find out what those are.
+ description: |-
+ Ignore overrides the set of excluded patterns in the .sourceignore format
+ (which is the same as .gitignore). If not provided, a default will be used,
+ consult the documentation for your version to find out what those are.
type: string
+ insecure:
+ description: Insecure allows connecting to a non-TLS HTTP container
+ registry.
+ type: boolean
interval:
- description: The interval at which to check for image updates.
+ description: |-
+ Interval at which the OCIRepository URL is checked for updates.
+ This interval is approximate and may be subject to jitter to ensure
+ efficient use of resources.
+ pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$
type: string
+ layerSelector:
+ description: |-
+ LayerSelector specifies which layer should be extracted from the OCI artifact.
+ When not specified, the first layer found in the artifact is selected.
+ properties:
+ mediaType:
+ description: |-
+ MediaType specifies the OCI media type of the layer
+ which should be extracted from the OCI Artifact. The
+ first layer matching this type is selected.
+ type: string
+ operation:
+ description: |-
+ Operation specifies how the selected layer should be processed.
+ By default, the layer compressed content is extracted to storage.
+ When the operation is set to 'copy', the layer compressed content
+ is persisted to storage as it is.
+ enum:
+ - extract
+ - copy
+ type: string
+ type: object
provider:
default: generic
- description: The provider used for authentication, can be 'aws', 'azure',
- 'gcp' or 'generic'. When not specified, defaults to 'generic'.
+ description: |-
+ The provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'.
+ When not specified, defaults to 'generic'.
enum:
- generic
- aws
- azure
- gcp
type: string
+ proxySecretRef:
+ description: |-
+ ProxySecretRef specifies the Secret containing the proxy configuration
+ to use while communicating with the container registry.
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
ref:
- description: The OCI reference to pull and monitor for changes, defaults
- to the latest tag.
+ description: |-
+ The OCI reference to pull and monitor for changes,
+ defaults to the latest tag.
properties:
digest:
- description: Digest is the image digest to pull, takes precedence
- over SemVer. The value should be in the format 'sha256:'.
+ description: |-
+ Digest is the image digest to pull, takes precedence over SemVer.
+ The value should be in the format 'sha256:'.
type: string
semver:
- description: SemVer is the range of tags to pull selecting the
- latest within the range, takes precedence over Tag.
+ description: |-
+ SemVer is the range of tags to pull selecting the latest within
+ the range, takes precedence over Tag.
+ type: string
+ semverFilter:
+ description: SemverFilter is a regex pattern to filter the tags
+ within the SemVer range.
type: string
tag:
description: Tag is the image tag to pull, defaults to latest.
type: string
type: object
secretRef:
- description: SecretRef contains the secret name containing the registry
- login credentials to resolve image metadata. The secret must be
- of type kubernetes.io/dockerconfigjson.
+ description: |-
+ SecretRef contains the secret name containing the registry login
+ credentials to resolve image metadata.
+ The secret must be of type kubernetes.io/dockerconfigjson.
properties:
name:
description: Name of the referent.
@@ -113,9 +172,10 @@ spec:
- name
type: object
serviceAccountName:
- description: 'ServiceAccountName is the name of the Kubernetes ServiceAccount
- used to authenticate the image pull if the service account has attached
- pull secrets. For more information: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account'
+ description: |-
+ ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate
+ the image pull if the service account has attached pull secrets. For more information:
+ https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account
type: string
suspend:
description: This flag tells the controller to suspend the reconciliation
@@ -125,12 +185,70 @@ spec:
default: 60s
description: The timeout for remote OCI Repository operations like
pulling, defaults to 60s.
+ pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$
type: string
url:
- description: URL is a reference to an OCI artifact repository hosted
+ description: |-
+ URL is a reference to an OCI artifact repository hosted
on a remote container registry.
pattern: ^oci://.*$
type: string
+ verify:
+ description: |-
+ Verify contains the secret name containing the trusted public keys
+ used to verify the signature and specifies which provider to use to check
+ whether OCI image is authentic.
+ properties:
+ matchOIDCIdentity:
+ description: |-
+ MatchOIDCIdentity specifies the identity matching criteria to use
+ while verifying an OCI artifact which was signed using Cosign keyless
+ signing. The artifact's identity is deemed to be verified if any of the
+ specified matchers match against the identity.
+ items:
+ description: |-
+ OIDCIdentityMatch specifies options for verifying the certificate identity,
+ i.e. the issuer and the subject of the certificate.
+ properties:
+ issuer:
+ description: |-
+ Issuer specifies the regex pattern to match against to verify
+ the OIDC issuer in the Fulcio certificate. The pattern must be a
+ valid Go regular expression.
+ type: string
+ subject:
+ description: |-
+ Subject specifies the regex pattern to match against to verify
+ the identity subject in the Fulcio certificate. The pattern must
+ be a valid Go regular expression.
+ type: string
+ required:
+ - issuer
+ - subject
+ type: object
+ type: array
+ provider:
+ default: cosign
+ description: Provider specifies the technology used to sign the
+ OCI Artifact.
+ enum:
+ - cosign
+ - notation
+ type: string
+ secretRef:
+ description: |-
+ SecretRef specifies the Kubernetes Secret containing the
+ trusted public keys.
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ required:
+ - provider
+ type: object
required:
- interval
- url
@@ -144,12 +262,14 @@ spec:
description: Artifact represents the output of the last successful
OCI Repository sync.
properties:
- checksum:
- description: Checksum is the SHA256 checksum of the Artifact file.
+ digest:
+ description: Digest is the digest of the file in the form of ':'.
+ pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$
type: string
lastUpdateTime:
- description: LastUpdateTime is the timestamp corresponding to
- the last update of the Artifact.
+ description: |-
+ LastUpdateTime is the timestamp corresponding to the last update of the
+ Artifact.
format: date-time
type: string
metadata:
@@ -158,70 +278,65 @@ spec:
description: Metadata holds upstream information such as OCI annotations.
type: object
path:
- description: Path is the relative file path of the Artifact. It
- can be used to locate the file in the root of the Artifact storage
- on the local file system of the controller managing the Source.
+ description: |-
+ Path is the relative file path of the Artifact. It can be used to locate
+ the file in the root of the Artifact storage on the local file system of
+ the controller managing the Source.
type: string
revision:
- description: Revision is a human-readable identifier traceable
- in the origin source system. It can be a Git commit SHA, Git
- tag, a Helm chart version, etc.
+ description: |-
+ Revision is a human-readable identifier traceable in the origin source
+ system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.
type: string
size:
description: Size is the number of bytes in the file.
format: int64
type: integer
url:
- description: URL is the HTTP address of the Artifact as exposed
- by the controller managing the Source. It can be used to retrieve
- the Artifact for consumption, e.g. by another controller applying
- the Artifact contents.
+ description: |-
+ URL is the HTTP address of the Artifact as exposed by the controller
+ managing the Source. It can be used to retrieve the Artifact for
+ consumption, e.g. by another controller applying the Artifact contents.
type: string
required:
+ - digest
+ - lastUpdateTime
- path
+ - revision
- url
type: object
conditions:
description: Conditions holds the conditions for the OCIRepository.
items:
- description: "Condition contains details for one aspect of the current
- state of this API Resource. --- This struct is intended for direct
- use as an array at the field path .status.conditions. For example,
- type FooStatus struct{ // Represents the observations of a
- foo's current state. // Known .status.conditions.type are:
- \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type
- \ // +patchStrategy=merge // +listType=map // +listMapKey=type
- \ Conditions []metav1.Condition `json:\"conditions,omitempty\"
- patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`
- \n // other fields }"
+ description: Condition contains details for one aspect of the current
+ state of this API Resource.
properties:
lastTransitionTime:
- description: lastTransitionTime is the last time the condition
- transitioned from one status to another. This should be when
- the underlying condition changed. If that is not known, then
- using the time when the API field changed is acceptable.
+ description: |-
+ lastTransitionTime is the last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
- description: message is a human readable message indicating
- details about the transition. This may be an empty string.
+ description: |-
+ message is a human readable message indicating details about the transition.
+ This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
- description: observedGeneration represents the .metadata.generation
- that the condition was set based upon. For instance, if .metadata.generation
- is currently 12, but the .status.conditions[x].observedGeneration
- is 9, the condition is out of date with respect to the current
- state of the instance.
+ description: |-
+ observedGeneration represents the .metadata.generation that the condition was set based upon.
+ For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
+ with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
- description: reason contains a programmatic identifier indicating
- the reason for the condition's last transition. Producers
- of specific condition types may define expected values and
- meanings for this field, and whether the values are considered
- a guaranteed API. The value should be a CamelCase string.
+ description: |-
+ reason contains a programmatic identifier indicating the reason for the condition's last transition.
+ Producers of specific condition types may define expected values and meanings for this field,
+ and whether the values are considered a guaranteed API.
+ The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
@@ -236,10 +351,6 @@ spec:
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
- --- Many .condition.type values are consistent across resources
- like Available, but because arbitrary conditions can be useful
- (see .node.status.conditions), the ability to deconflict is
- important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
@@ -252,14 +363,42 @@ spec:
type: object
type: array
lastHandledReconcileAt:
- description: LastHandledReconcileAt holds the value of the most recent
- reconcile request value, so a change of the annotation value can
- be detected.
+ description: |-
+ LastHandledReconcileAt holds the value of the most recent
+ reconcile request value, so a change of the annotation value
+ can be detected.
type: string
observedGeneration:
description: ObservedGeneration is the last observed generation.
format: int64
type: integer
+ observedIgnore:
+ description: |-
+ ObservedIgnore is the observed exclusion patterns used for constructing
+ the source artifact.
+ type: string
+ observedLayerSelector:
+ description: |-
+ ObservedLayerSelector is the observed layer selector used for constructing
+ the source artifact.
+ properties:
+ mediaType:
+ description: |-
+ MediaType specifies the OCI media type of the layer
+ which should be extracted from the OCI Artifact. The
+ first layer matching this type is selected.
+ type: string
+ operation:
+ description: |-
+ Operation specifies how the selected layer should be processed.
+ By default, the layer compressed content is extracted to storage.
+ When the operation is set to 'copy', the layer compressed content
+ is persisted to storage as it is.
+ enum:
+ - extract
+ - copy
+ type: string
+ type: object
url:
description: URL is the download link for the artifact output of the
last OCI Repository sync.
@@ -270,9 +409,415 @@ spec:
storage: true
subresources:
status: {}
-status:
- acceptedNames:
- kind: ""
- plural: ""
- conditions: []
- storedVersions: []
+ - additionalPrinterColumns:
+ - jsonPath: .spec.url
+ name: URL
+ type: string
+ - jsonPath: .status.conditions[?(@.type=="Ready")].status
+ name: Ready
+ type: string
+ - jsonPath: .status.conditions[?(@.type=="Ready")].message
+ name: Status
+ type: string
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ deprecated: true
+ deprecationWarning: v1beta2 OCIRepository is deprecated, upgrade to v1
+ name: v1beta2
+ schema:
+ openAPIV3Schema:
+ description: OCIRepository is the Schema for the ocirepositories API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: OCIRepositorySpec defines the desired state of OCIRepository
+ properties:
+ certSecretRef:
+ description: |-
+ CertSecretRef can be given the name of a Secret containing
+ either or both of
+
+ - a PEM-encoded client certificate (`tls.crt`) and private
+ key (`tls.key`);
+ - a PEM-encoded CA certificate (`ca.crt`)
+
+ and whichever are supplied, will be used for connecting to the
+ registry. The client cert and key are useful if you are
+ authenticating with a certificate; the CA cert is useful if
+ you are using a self-signed server certificate. The Secret must
+ be of type `Opaque` or `kubernetes.io/tls`.
+
+ Note: Support for the `caFile`, `certFile` and `keyFile` keys have
+ been deprecated.
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ ignore:
+ description: |-
+ Ignore overrides the set of excluded patterns in the .sourceignore format
+ (which is the same as .gitignore). If not provided, a default will be used,
+ consult the documentation for your version to find out what those are.
+ type: string
+ insecure:
+ description: Insecure allows connecting to a non-TLS HTTP container
+ registry.
+ type: boolean
+ interval:
+ description: |-
+ Interval at which the OCIRepository URL is checked for updates.
+ This interval is approximate and may be subject to jitter to ensure
+ efficient use of resources.
+ pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$
+ type: string
+ layerSelector:
+ description: |-
+ LayerSelector specifies which layer should be extracted from the OCI artifact.
+ When not specified, the first layer found in the artifact is selected.
+ properties:
+ mediaType:
+ description: |-
+ MediaType specifies the OCI media type of the layer
+ which should be extracted from the OCI Artifact. The
+ first layer matching this type is selected.
+ type: string
+ operation:
+ description: |-
+ Operation specifies how the selected layer should be processed.
+ By default, the layer compressed content is extracted to storage.
+ When the operation is set to 'copy', the layer compressed content
+ is persisted to storage as it is.
+ enum:
+ - extract
+ - copy
+ type: string
+ type: object
+ provider:
+ default: generic
+ description: |-
+ The provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'.
+ When not specified, defaults to 'generic'.
+ enum:
+ - generic
+ - aws
+ - azure
+ - gcp
+ type: string
+ proxySecretRef:
+ description: |-
+ ProxySecretRef specifies the Secret containing the proxy configuration
+ to use while communicating with the container registry.
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ ref:
+ description: |-
+ The OCI reference to pull and monitor for changes,
+ defaults to the latest tag.
+ properties:
+ digest:
+ description: |-
+ Digest is the image digest to pull, takes precedence over SemVer.
+ The value should be in the format 'sha256:'.
+ type: string
+ semver:
+ description: |-
+ SemVer is the range of tags to pull selecting the latest within
+ the range, takes precedence over Tag.
+ type: string
+ semverFilter:
+ description: SemverFilter is a regex pattern to filter the tags
+ within the SemVer range.
+ type: string
+ tag:
+ description: Tag is the image tag to pull, defaults to latest.
+ type: string
+ type: object
+ secretRef:
+ description: |-
+ SecretRef contains the secret name containing the registry login
+ credentials to resolve image metadata.
+ The secret must be of type kubernetes.io/dockerconfigjson.
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ serviceAccountName:
+ description: |-
+ ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate
+ the image pull if the service account has attached pull secrets. For more information:
+ https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account
+ type: string
+ suspend:
+ description: This flag tells the controller to suspend the reconciliation
+ of this source.
+ type: boolean
+ timeout:
+ default: 60s
+ description: The timeout for remote OCI Repository operations like
+ pulling, defaults to 60s.
+ pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$
+ type: string
+ url:
+ description: |-
+ URL is a reference to an OCI artifact repository hosted
+ on a remote container registry.
+ pattern: ^oci://.*$
+ type: string
+ verify:
+ description: |-
+ Verify contains the secret name containing the trusted public keys
+ used to verify the signature and specifies which provider to use to check
+ whether OCI image is authentic.
+ properties:
+ matchOIDCIdentity:
+ description: |-
+ MatchOIDCIdentity specifies the identity matching criteria to use
+ while verifying an OCI artifact which was signed using Cosign keyless
+ signing. The artifact's identity is deemed to be verified if any of the
+ specified matchers match against the identity.
+ items:
+ description: |-
+ OIDCIdentityMatch specifies options for verifying the certificate identity,
+ i.e. the issuer and the subject of the certificate.
+ properties:
+ issuer:
+ description: |-
+ Issuer specifies the regex pattern to match against to verify
+ the OIDC issuer in the Fulcio certificate. The pattern must be a
+ valid Go regular expression.
+ type: string
+ subject:
+ description: |-
+ Subject specifies the regex pattern to match against to verify
+ the identity subject in the Fulcio certificate. The pattern must
+ be a valid Go regular expression.
+ type: string
+ required:
+ - issuer
+ - subject
+ type: object
+ type: array
+ provider:
+ default: cosign
+ description: Provider specifies the technology used to sign the
+ OCI Artifact.
+ enum:
+ - cosign
+ - notation
+ type: string
+ secretRef:
+ description: |-
+ SecretRef specifies the Kubernetes Secret containing the
+ trusted public keys.
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ required:
+ - provider
+ type: object
+ required:
+ - interval
+ - url
+ type: object
+ status:
+ default:
+ observedGeneration: -1
+ description: OCIRepositoryStatus defines the observed state of OCIRepository
+ properties:
+ artifact:
+ description: Artifact represents the output of the last successful
+ OCI Repository sync.
+ properties:
+ digest:
+ description: Digest is the digest of the file in the form of ':'.
+ pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$
+ type: string
+ lastUpdateTime:
+ description: |-
+ LastUpdateTime is the timestamp corresponding to the last update of the
+ Artifact.
+ format: date-time
+ type: string
+ metadata:
+ additionalProperties:
+ type: string
+ description: Metadata holds upstream information such as OCI annotations.
+ type: object
+ path:
+ description: |-
+ Path is the relative file path of the Artifact. It can be used to locate
+ the file in the root of the Artifact storage on the local file system of
+ the controller managing the Source.
+ type: string
+ revision:
+ description: |-
+ Revision is a human-readable identifier traceable in the origin source
+ system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.
+ type: string
+ size:
+ description: Size is the number of bytes in the file.
+ format: int64
+ type: integer
+ url:
+ description: |-
+ URL is the HTTP address of the Artifact as exposed by the controller
+ managing the Source. It can be used to retrieve the Artifact for
+ consumption, e.g. by another controller applying the Artifact contents.
+ type: string
+ required:
+ - digest
+ - lastUpdateTime
+ - path
+ - revision
+ - url
+ type: object
+ conditions:
+ description: Conditions holds the conditions for the OCIRepository.
+ items:
+ description: Condition contains details for one aspect of the current
+ state of this API Resource.
+ properties:
+ lastTransitionTime:
+ description: |-
+ lastTransitionTime is the last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ message is a human readable message indicating details about the transition.
+ This may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: |-
+ observedGeneration represents the .metadata.generation that the condition was set based upon.
+ For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
+ with respect to the current state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: |-
+ reason contains a programmatic identifier indicating the reason for the condition's last transition.
+ Producers of specific condition types may define expected values and meanings for this field,
+ and whether the values are considered a guaranteed API.
+ The value should be a CamelCase string.
+ This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True, False, Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in foo.example.com/CamelCase.
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ contentConfigChecksum:
+ description: |-
+ ContentConfigChecksum is a checksum of all the configurations related to
+ the content of the source artifact:
+ - .spec.ignore
+ - .spec.layerSelector
+ observed in .status.observedGeneration version of the object. This can
+ be used to determine if the content configuration has changed and the
+ artifact needs to be rebuilt.
+ It has the format of `:`, for example: `sha256:`.
+
+ Deprecated: Replaced with explicit fields for observed artifact content
+ config in the status.
+ type: string
+ lastHandledReconcileAt:
+ description: |-
+ LastHandledReconcileAt holds the value of the most recent
+ reconcile request value, so a change of the annotation value
+ can be detected.
+ type: string
+ observedGeneration:
+ description: ObservedGeneration is the last observed generation.
+ format: int64
+ type: integer
+ observedIgnore:
+ description: |-
+ ObservedIgnore is the observed exclusion patterns used for constructing
+ the source artifact.
+ type: string
+ observedLayerSelector:
+ description: |-
+ ObservedLayerSelector is the observed layer selector used for constructing
+ the source artifact.
+ properties:
+ mediaType:
+ description: |-
+ MediaType specifies the OCI media type of the layer
+ which should be extracted from the OCI Artifact. The
+ first layer matching this type is selected.
+ type: string
+ operation:
+ description: |-
+ Operation specifies how the selected layer should be processed.
+ By default, the layer compressed content is extracted to storage.
+ When the operation is set to 'copy', the layer compressed content
+ is persisted to storage as it is.
+ enum:
+ - extract
+ - copy
+ type: string
+ type: object
+ url:
+ description: URL is the download link for the artifact output of the
+ last OCI Repository sync.
+ type: string
+ type: object
+ type: object
+ served: true
+ storage: false
+ subresources:
+ status: {}
diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml
index c00716353..2a09dbfd5 100644
--- a/config/crd/kustomization.yaml
+++ b/config/crd/kustomization.yaml
@@ -6,4 +6,5 @@ resources:
- bases/source.toolkit.fluxcd.io_helmcharts.yaml
- bases/source.toolkit.fluxcd.io_buckets.yaml
- bases/source.toolkit.fluxcd.io_ocirepositories.yaml
+- bases/source.toolkit.fluxcd.io_externalartifacts.yaml
# +kubebuilder:scaffold:crdkustomizeresource
diff --git a/config/manager/deployment.yaml b/config/manager/deployment.yaml
index 3ea2c59ff..e354b00e3 100644
--- a/config/manager/deployment.yaml
+++ b/config/manager/deployment.yaml
@@ -51,6 +51,8 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
+ - name: TUF_ROOT # store the Fulcio root CA file in tmp
+ value: "/tmp/.sigstore"
args:
- --watch-all-namespaces
- --log-level=info
diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml
index 7a7a6f472..0118ce85b 100644
--- a/config/manager/kustomization.yaml
+++ b/config/manager/kustomization.yaml
@@ -6,4 +6,4 @@ resources:
images:
- name: fluxcd/source-controller
newName: fluxcd/source-controller
- newTag: v0.26.1
+ newTag: v1.7.0
diff --git a/config/rbac/externalartifact_editor_role.yaml b/config/rbac/externalartifact_editor_role.yaml
new file mode 100644
index 000000000..ded6c1d93
--- /dev/null
+++ b/config/rbac/externalartifact_editor_role.yaml
@@ -0,0 +1,24 @@
+# permissions for end users to edit externalartifacts.
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: externalartifact-editor-role
+rules:
+- apiGroups:
+ - source.toolkit.fluxcd.io
+ resources:
+ - externalartifacts
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - source.toolkit.fluxcd.io
+ resources:
+ - externalartifacts/status
+ verbs:
+ - get
diff --git a/config/rbac/externalartifact_viewer_role.yaml b/config/rbac/externalartifact_viewer_role.yaml
new file mode 100644
index 000000000..d0c1d507f
--- /dev/null
+++ b/config/rbac/externalartifact_viewer_role.yaml
@@ -0,0 +1,20 @@
+# permissions for end users to view externalartifacts.
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: externalartifacts-viewer-role
+rules:
+- apiGroups:
+ - source.toolkit.fluxcd.io
+ resources:
+ - externalartifacts
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - source.toolkit.fluxcd.io
+ resources:
+ - externalartifacts/status
+ verbs:
+ - get
diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml
index a048672d6..d2cd9e7cb 100644
--- a/config/rbac/role.yaml
+++ b/config/rbac/role.yaml
@@ -1,9 +1,7 @@
-
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
- creationTimestamp: null
name: manager-role
rules:
- apiGroups:
@@ -17,133 +15,24 @@ rules:
- ""
resources:
- secrets
+ - serviceaccounts
verbs:
- get
- list
- watch
- apiGroups:
- - source.toolkit.fluxcd.io
+ - ""
resources:
- - buckets
+ - serviceaccounts/token
verbs:
- create
- - delete
- - get
- - list
- - patch
- - update
- - watch
-- apiGroups:
- - source.toolkit.fluxcd.io
- resources:
- - buckets/finalizers
- verbs:
- - create
- - delete
- - get
- - patch
- - update
-- apiGroups:
- - source.toolkit.fluxcd.io
- resources:
- - buckets/status
- verbs:
- - get
- - patch
- - update
- apiGroups:
- source.toolkit.fluxcd.io
resources:
+ - buckets
- gitrepositories
- verbs:
- - create
- - delete
- - get
- - list
- - patch
- - update
- - watch
-- apiGroups:
- - source.toolkit.fluxcd.io
- resources:
- - gitrepositories/finalizers
- verbs:
- - create
- - delete
- - get
- - patch
- - update
-- apiGroups:
- - source.toolkit.fluxcd.io
- resources:
- - gitrepositories/status
- verbs:
- - get
- - patch
- - update
-- apiGroups:
- - source.toolkit.fluxcd.io
- resources:
- helmcharts
- verbs:
- - create
- - delete
- - get
- - list
- - patch
- - update
- - watch
-- apiGroups:
- - source.toolkit.fluxcd.io
- resources:
- - helmcharts/finalizers
- verbs:
- - create
- - delete
- - get
- - patch
- - update
-- apiGroups:
- - source.toolkit.fluxcd.io
- resources:
- - helmcharts/status
- verbs:
- - get
- - patch
- - update
-- apiGroups:
- - source.toolkit.fluxcd.io
- resources:
- helmrepositories
- verbs:
- - create
- - delete
- - get
- - list
- - patch
- - update
- - watch
-- apiGroups:
- - source.toolkit.fluxcd.io
- resources:
- - helmrepositories/finalizers
- verbs:
- - create
- - delete
- - get
- - patch
- - update
-- apiGroups:
- - source.toolkit.fluxcd.io
- resources:
- - helmrepositories/status
- verbs:
- - get
- - patch
- - update
-- apiGroups:
- - source.toolkit.fluxcd.io
- resources:
- ocirepositories
verbs:
- create
@@ -156,6 +45,10 @@ rules:
- apiGroups:
- source.toolkit.fluxcd.io
resources:
+ - buckets/finalizers
+ - gitrepositories/finalizers
+ - helmcharts/finalizers
+ - helmrepositories/finalizers
- ocirepositories/finalizers
verbs:
- create
@@ -166,6 +59,10 @@ rules:
- apiGroups:
- source.toolkit.fluxcd.io
resources:
+ - buckets/status
+ - gitrepositories/status
+ - helmcharts/status
+ - helmrepositories/status
- ocirepositories/status
verbs:
- get
diff --git a/config/samples/source_v1beta2_bucket.yaml b/config/samples/source_v1_bucket.yaml
similarity index 81%
rename from config/samples/source_v1beta2_bucket.yaml
rename to config/samples/source_v1_bucket.yaml
index cbc211aa6..f09cbe213 100644
--- a/config/samples/source_v1beta2_bucket.yaml
+++ b/config/samples/source_v1_bucket.yaml
@@ -1,4 +1,4 @@
-apiVersion: source.toolkit.fluxcd.io/v1beta2
+apiVersion: source.toolkit.fluxcd.io/v1
kind: Bucket
metadata:
name: bucket-sample
diff --git a/config/samples/source_v1beta2_gitrepository.yaml b/config/samples/source_v1_gitrepository.yaml
similarity index 77%
rename from config/samples/source_v1beta2_gitrepository.yaml
rename to config/samples/source_v1_gitrepository.yaml
index f22674600..27fad9a25 100644
--- a/config/samples/source_v1beta2_gitrepository.yaml
+++ b/config/samples/source_v1_gitrepository.yaml
@@ -1,4 +1,4 @@
-apiVersion: source.toolkit.fluxcd.io/v1beta2
+apiVersion: source.toolkit.fluxcd.io/v1
kind: GitRepository
metadata:
name: gitrepository-sample
diff --git a/config/samples/source_v1beta2_helmchart_gitrepository.yaml b/config/samples/source_v1_helmchart_gitrepository.yaml
similarity index 78%
rename from config/samples/source_v1beta2_helmchart_gitrepository.yaml
rename to config/samples/source_v1_helmchart_gitrepository.yaml
index 731d8d21b..680e7b184 100644
--- a/config/samples/source_v1beta2_helmchart_gitrepository.yaml
+++ b/config/samples/source_v1_helmchart_gitrepository.yaml
@@ -1,4 +1,4 @@
-apiVersion: source.toolkit.fluxcd.io/v1beta2
+apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmChart
metadata:
name: helmchart-git-sample
diff --git a/config/samples/source_v1beta2_helmchart_helmrepository-oci.yaml b/config/samples/source_v1_helmchart_helmrepository-oci.yaml
similarity index 82%
rename from config/samples/source_v1beta2_helmchart_helmrepository-oci.yaml
rename to config/samples/source_v1_helmchart_helmrepository-oci.yaml
index d2cdc15c6..d9dd3279d 100644
--- a/config/samples/source_v1beta2_helmchart_helmrepository-oci.yaml
+++ b/config/samples/source_v1_helmchart_helmrepository-oci.yaml
@@ -1,4 +1,4 @@
-apiVersion: source.toolkit.fluxcd.io/v1beta2
+apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmChart
metadata:
name: helmchart-sample-oci
diff --git a/config/samples/source_v1beta2_helmchart_helmrepository.yaml b/config/samples/source_v1_helmchart_helmrepository.yaml
similarity index 63%
rename from config/samples/source_v1beta2_helmchart_helmrepository.yaml
rename to config/samples/source_v1_helmchart_helmrepository.yaml
index a6bd7c207..d1b43fe3e 100644
--- a/config/samples/source_v1beta2_helmchart_helmrepository.yaml
+++ b/config/samples/source_v1_helmchart_helmrepository.yaml
@@ -1,11 +1,12 @@
-apiVersion: source.toolkit.fluxcd.io/v1beta2
+apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmChart
metadata:
name: helmchart-sample
spec:
chart: podinfo
- version: '>=2.0.0 <3.0.0'
+ version: '6.x'
sourceRef:
kind: HelmRepository
name: helmrepository-sample
interval: 1m
+ ignoreMissingValuesFiles: true
diff --git a/config/samples/source_v1beta2_helmrepository-oci.yaml b/config/samples/source_v1_helmrepository-oci.yaml
similarity index 72%
rename from config/samples/source_v1beta2_helmrepository-oci.yaml
rename to config/samples/source_v1_helmrepository-oci.yaml
index bc487c990..458dc73c2 100644
--- a/config/samples/source_v1beta2_helmrepository-oci.yaml
+++ b/config/samples/source_v1_helmrepository-oci.yaml
@@ -1,4 +1,4 @@
-apiVersion: source.toolkit.fluxcd.io/v1beta2
+apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: helmrepository-sample-oci
diff --git a/config/samples/source_v1beta2_helmrepository.yaml b/config/samples/source_v1_helmrepository.yaml
similarity index 73%
rename from config/samples/source_v1beta2_helmrepository.yaml
rename to config/samples/source_v1_helmrepository.yaml
index 4a2c7ab36..b7049cc0a 100644
--- a/config/samples/source_v1beta2_helmrepository.yaml
+++ b/config/samples/source_v1_helmrepository.yaml
@@ -1,4 +1,4 @@
-apiVersion: source.toolkit.fluxcd.io/v1beta2
+apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: helmrepository-sample
diff --git a/config/samples/source_v1beta2_ocirepository.yaml b/config/samples/source_v1_ocirepository.yaml
similarity index 77%
rename from config/samples/source_v1beta2_ocirepository.yaml
rename to config/samples/source_v1_ocirepository.yaml
index e06241b97..69fb19e2a 100644
--- a/config/samples/source_v1beta2_ocirepository.yaml
+++ b/config/samples/source_v1_ocirepository.yaml
@@ -1,4 +1,4 @@
-apiVersion: source.toolkit.fluxcd.io/v1beta2
+apiVersion: source.toolkit.fluxcd.io/v1
kind: OCIRepository
metadata:
name: ocirepository-sample
diff --git a/config/testdata/bucket/source.yaml b/config/testdata/bucket/source.yaml
index 459e7400a..bd3097ee2 100644
--- a/config/testdata/bucket/source.yaml
+++ b/config/testdata/bucket/source.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: source.toolkit.fluxcd.io/v1beta1
+apiVersion: source.toolkit.fluxcd.io/v1
kind: Bucket
metadata:
name: podinfo
diff --git a/config/testdata/git/large-repo.yaml b/config/testdata/git/large-repo.yaml
index 139b44415..ad3defd68 100644
--- a/config/testdata/git/large-repo.yaml
+++ b/config/testdata/git/large-repo.yaml
@@ -1,29 +1,10 @@
-apiVersion: source.toolkit.fluxcd.io/v1beta1
+apiVersion: source.toolkit.fluxcd.io/v1
kind: GitRepository
metadata:
- name: large-repo-go-git
+ name: large-repo
spec:
- gitImplementation: go-git
interval: 10m
timeout: 2m
- url: https://github.com/hashgraph/hedera-mirror-node.git
+ url: https://github.com/nodejs/node.git
ref:
branch: main
- ignore: |
- /*
- !/charts
----
-apiVersion: source.toolkit.fluxcd.io/v1beta1
-kind: GitRepository
-metadata:
- name: large-repo-libgit2
-spec:
- gitImplementation: libgit2
- interval: 10m
- timeout: 2m
- url: https://github.com/hashgraph/hedera-mirror-node.git
- ref:
- branch: main
- ignore: |
- /*
- !/charts
diff --git a/config/testdata/helmchart-from-bucket/source.yaml b/config/testdata/helmchart-from-bucket/source.yaml
index 0609cf541..814305d13 100644
--- a/config/testdata/helmchart-from-bucket/source.yaml
+++ b/config/testdata/helmchart-from-bucket/source.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: source.toolkit.fluxcd.io/v1beta1
+apiVersion: source.toolkit.fluxcd.io/v1
kind: Bucket
metadata:
name: charts
@@ -13,7 +13,7 @@ spec:
secretRef:
name: minio-credentials
---
-apiVersion: source.toolkit.fluxcd.io/v1beta1
+apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmChart
metadata:
name: helmchart-bucket
diff --git a/config/testdata/helmchart-from-oci/notation.yaml b/config/testdata/helmchart-from-oci/notation.yaml
new file mode 100644
index 000000000..6434479ea
--- /dev/null
+++ b/config/testdata/helmchart-from-oci/notation.yaml
@@ -0,0 +1,25 @@
+---
+apiVersion: source.toolkit.fluxcd.io/v1
+kind: HelmRepository
+metadata:
+ name: podinfo-notation
+spec:
+ url: oci://ghcr.io/stefanprodan/charts
+ type: "oci"
+ interval: 1m
+---
+apiVersion: source.toolkit.fluxcd.io/v1
+kind: HelmChart
+metadata:
+ name: podinfo-notation
+spec:
+ chart: podinfo
+ sourceRef:
+ kind: HelmRepository
+ name: podinfo-notation
+ version: '6.6.0'
+ interval: 1m
+ verify:
+ provider: notation
+ secretRef:
+ name: notation-config
diff --git a/config/testdata/helmchart-from-oci/source.yaml b/config/testdata/helmchart-from-oci/source.yaml
index 9d9945ff6..b2786531e 100644
--- a/config/testdata/helmchart-from-oci/source.yaml
+++ b/config/testdata/helmchart-from-oci/source.yaml
@@ -1,5 +1,5 @@
---
-apiVersion: source.toolkit.fluxcd.io/v1beta2
+apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: podinfo
@@ -8,7 +8,7 @@ spec:
type: "oci"
interval: 1m
---
-apiVersion: source.toolkit.fluxcd.io/v1beta2
+apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmChart
metadata:
name: podinfo
@@ -19,3 +19,17 @@ spec:
name: podinfo
version: '6.1.*'
interval: 1m
+---
+apiVersion: source.toolkit.fluxcd.io/v1
+kind: HelmChart
+metadata:
+ name: podinfo-keyless
+spec:
+ chart: podinfo
+ sourceRef:
+ kind: HelmRepository
+ name: podinfo
+ version: '6.2.1'
+ interval: 1m
+ verify:
+ provider: cosign
diff --git a/config/testdata/helmchart-valuesfile/gitrepository.yaml b/config/testdata/helmchart-valuesfile/gitrepository.yaml
index b620c8560..279979e93 100644
--- a/config/testdata/helmchart-valuesfile/gitrepository.yaml
+++ b/config/testdata/helmchart-valuesfile/gitrepository.yaml
@@ -1,4 +1,4 @@
-apiVersion: source.toolkit.fluxcd.io/v1beta1
+apiVersion: source.toolkit.fluxcd.io/v1
kind: GitRepository
metadata:
name: podinfo
diff --git a/config/testdata/helmchart-valuesfile/helmchart_gitrepository.yaml b/config/testdata/helmchart-valuesfile/helmchart_gitrepository.yaml
index 4483f0ca8..3c26b3eb5 100644
--- a/config/testdata/helmchart-valuesfile/helmchart_gitrepository.yaml
+++ b/config/testdata/helmchart-valuesfile/helmchart_gitrepository.yaml
@@ -1,4 +1,4 @@
-apiVersion: source.toolkit.fluxcd.io/v1beta1
+apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmChart
metadata:
name: podinfo-git
@@ -8,6 +8,5 @@ spec:
kind: GitRepository
name: podinfo
chart: charts/podinfo
- valuesFile: charts/podinfo/values.yaml
valuesFiles:
- charts/podinfo/values-prod.yaml
diff --git a/config/testdata/helmchart-valuesfile/helmchart_helmrepository.yaml b/config/testdata/helmchart-valuesfile/helmchart_helmrepository.yaml
index fdf34f6bf..0b004eb7a 100644
--- a/config/testdata/helmchart-valuesfile/helmchart_helmrepository.yaml
+++ b/config/testdata/helmchart-valuesfile/helmchart_helmrepository.yaml
@@ -1,4 +1,4 @@
-apiVersion: source.toolkit.fluxcd.io/v1beta1
+apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmChart
metadata:
name: podinfo
@@ -8,6 +8,5 @@ spec:
kind: HelmRepository
name: podinfo
chart: podinfo
- valuesFile: values.yaml
valuesFiles:
- values-prod.yaml
diff --git a/config/testdata/helmchart-valuesfile/helmrepository.yaml b/config/testdata/helmchart-valuesfile/helmrepository.yaml
index ab568384c..f0c178695 100644
--- a/config/testdata/helmchart-valuesfile/helmrepository.yaml
+++ b/config/testdata/helmchart-valuesfile/helmrepository.yaml
@@ -1,4 +1,4 @@
-apiVersion: source.toolkit.fluxcd.io/v1beta1
+apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: podinfo
diff --git a/config/testdata/ocirepository/signed-with-key.yaml b/config/testdata/ocirepository/signed-with-key.yaml
new file mode 100644
index 000000000..0a3a652ee
--- /dev/null
+++ b/config/testdata/ocirepository/signed-with-key.yaml
@@ -0,0 +1,14 @@
+---
+apiVersion: source.toolkit.fluxcd.io/v1
+kind: OCIRepository
+metadata:
+ name: podinfo-deploy-signed-with-key
+spec:
+ interval: 5m
+ url: oci://ghcr.io/stefanprodan/podinfo-deploy
+ ref:
+ semver: "6.2.x"
+ verify:
+ provider: cosign
+ secretRef:
+ name: cosign-key
diff --git a/config/testdata/ocirepository/signed-with-keyless.yaml b/config/testdata/ocirepository/signed-with-keyless.yaml
new file mode 100644
index 000000000..ff46ed30d
--- /dev/null
+++ b/config/testdata/ocirepository/signed-with-keyless.yaml
@@ -0,0 +1,12 @@
+---
+apiVersion: source.toolkit.fluxcd.io/v1
+kind: OCIRepository
+metadata:
+ name: podinfo-deploy-signed-with-keyless
+spec:
+ interval: 5m
+ url: oci://ghcr.io/stefanprodan/manifests/podinfo
+ ref:
+ semver: "6.2.x"
+ verify:
+ provider: cosign
diff --git a/config/testdata/ocirepository/signed-with-notation.yaml b/config/testdata/ocirepository/signed-with-notation.yaml
new file mode 100644
index 000000000..55820f6d4
--- /dev/null
+++ b/config/testdata/ocirepository/signed-with-notation.yaml
@@ -0,0 +1,14 @@
+---
+apiVersion: source.toolkit.fluxcd.io/v1
+kind: OCIRepository
+metadata:
+ name: podinfo-deploy-signed-with-notation
+spec:
+ interval: 5m
+ url: oci://ghcr.io/stefanprodan/podinfo-deploy
+ ref:
+ semver: "6.6.x"
+ verify:
+ provider: notation
+ secretRef:
+ name: notation-config
diff --git a/controllers/bucket_controller_test.go b/controllers/bucket_controller_test.go
deleted file mode 100644
index 37cc33d91..000000000
--- a/controllers/bucket_controller_test.go
+++ /dev/null
@@ -1,1282 +0,0 @@
-/*
-Copyright 2021 The Flux authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package controllers
-
-import (
- "context"
- "errors"
- "fmt"
- "net/http"
- "net/url"
- "os"
- "path/filepath"
- "strings"
- "testing"
- "time"
-
- "github.com/darkowlzz/controller-check/status"
- "github.com/fluxcd/pkg/apis/meta"
- "github.com/fluxcd/pkg/runtime/conditions"
- "github.com/fluxcd/pkg/runtime/patch"
- . "github.com/onsi/gomega"
- corev1 "k8s.io/api/core/v1"
- apierrors "k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/client-go/tools/record"
- kstatus "sigs.k8s.io/cli-utils/pkg/kstatus/status"
- "sigs.k8s.io/controller-runtime/pkg/client"
- "sigs.k8s.io/controller-runtime/pkg/client/fake"
- fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake"
-
- sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
- gcsmock "github.com/fluxcd/source-controller/internal/mock/gcs"
- s3mock "github.com/fluxcd/source-controller/internal/mock/s3"
- sreconcile "github.com/fluxcd/source-controller/internal/reconcile"
- "github.com/fluxcd/source-controller/internal/reconcile/summarize"
-)
-
-// Environment variable to set the GCP Storage host for the GCP client.
-const EnvGcpStorageHost = "STORAGE_EMULATOR_HOST"
-
-func TestBucketReconciler_Reconcile(t *testing.T) {
- g := NewWithT(t)
-
- s3Server := s3mock.NewServer("test-bucket")
- s3Server.Objects = []*s3mock.Object{
- {
- Key: "test.yaml",
- Content: []byte("test"),
- ContentType: "text/plain",
- LastModified: time.Now(),
- },
- }
- s3Server.Start()
- defer s3Server.Stop()
-
- g.Expect(s3Server.HTTPAddress()).ToNot(BeEmpty())
- u, err := url.Parse(s3Server.HTTPAddress())
- g.Expect(err).NotTo(HaveOccurred())
-
- secret := &corev1.Secret{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "bucket-reconcile-",
- Namespace: "default",
- },
- Data: map[string][]byte{
- "accesskey": []byte("key"),
- "secretkey": []byte("secret"),
- },
- }
- g.Expect(testEnv.Create(ctx, secret)).To(Succeed())
- defer testEnv.Delete(ctx, secret)
-
- obj := &sourcev1.Bucket{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "bucket-reconcile-",
- Namespace: "default",
- },
- Spec: sourcev1.BucketSpec{
- Provider: "generic",
- BucketName: s3Server.BucketName,
- Endpoint: u.Host,
- Insecure: true,
- Interval: metav1.Duration{Duration: interval},
- Timeout: &metav1.Duration{Duration: timeout},
- SecretRef: &meta.LocalObjectReference{
- Name: secret.Name,
- },
- },
- }
- g.Expect(testEnv.Create(ctx, obj)).To(Succeed())
-
- key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace}
-
- // Wait for finalizer to be set
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, obj); err != nil {
- return false
- }
- return len(obj.Finalizers) > 0
- }, timeout).Should(BeTrue())
-
- // Wait for Bucket to be Ready
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, obj); err != nil {
- return false
- }
- if !conditions.IsReady(obj) || obj.Status.Artifact == nil {
- return false
- }
- readyCondition := conditions.Get(obj, meta.ReadyCondition)
- return obj.Generation == readyCondition.ObservedGeneration &&
- obj.Generation == obj.Status.ObservedGeneration
- }, timeout).Should(BeTrue())
-
- // Check if the object status is valid.
- condns := &status.Conditions{NegativePolarity: bucketReadyCondition.NegativePolarity}
- checker := status.NewChecker(testEnv.Client, condns)
- checker.CheckErr(ctx, obj)
-
- // kstatus client conformance check.
- uo, err := patch.ToUnstructured(obj)
- g.Expect(err).ToNot(HaveOccurred())
- res, err := kstatus.Compute(uo)
- g.Expect(err).ToNot(HaveOccurred())
- g.Expect(res.Status).To(Equal(kstatus.CurrentStatus))
-
- // Patch the object with reconcile request annotation.
- patchHelper, err := patch.NewHelper(obj, testEnv.Client)
- g.Expect(err).ToNot(HaveOccurred())
- annotations := map[string]string{
- meta.ReconcileRequestAnnotation: "now",
- }
- obj.SetAnnotations(annotations)
- g.Expect(patchHelper.Patch(ctx, obj)).ToNot(HaveOccurred())
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, obj); err != nil {
- return false
- }
- return obj.Status.LastHandledReconcileAt == "now"
- }, timeout).Should(BeTrue())
-
- g.Expect(testEnv.Delete(ctx, obj)).To(Succeed())
-
- // Wait for Bucket to be deleted
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, obj); err != nil {
- return apierrors.IsNotFound(err)
- }
- return false
- }, timeout).Should(BeTrue())
-}
-
-func TestBucketReconciler_reconcileStorage(t *testing.T) {
- tests := []struct {
- name string
- beforeFunc func(obj *sourcev1.Bucket, storage *Storage) error
- want sreconcile.Result
- wantErr bool
- assertArtifact *sourcev1.Artifact
- assertConditions []metav1.Condition
- assertPaths []string
- }{
- {
- name: "garbage collects",
- beforeFunc: func(obj *sourcev1.Bucket, storage *Storage) error {
- revisions := []string{"a", "b", "c", "d"}
- for n := range revisions {
- v := revisions[n]
- obj.Status.Artifact = &sourcev1.Artifact{
- Path: fmt.Sprintf("/reconcile-storage/%s.txt", v),
- Revision: v,
- }
- if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil {
- return err
- }
- if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(v), 0o640); err != nil {
- return err
- }
- if n != len(revisions)-1 {
- time.Sleep(time.Second * 1)
- }
- }
- testStorage.SetArtifactURL(obj.Status.Artifact)
- return nil
- },
- assertArtifact: &sourcev1.Artifact{
- Path: "/reconcile-storage/d.txt",
- Revision: "d",
- Checksum: "18ac3e7343f016890c510e93f935261169d9e3f565436429830faf0934f4f8e4",
- URL: testStorage.Hostname + "/reconcile-storage/d.txt",
- Size: int64p(int64(len("d"))),
- },
- assertPaths: []string{
- "/reconcile-storage/d.txt",
- "/reconcile-storage/c.txt",
- "!/reconcile-storage/b.txt",
- "!/reconcile-storage/a.txt",
- },
- want: sreconcile.ResultSuccess,
- },
- {
- name: "notices missing artifact in storage",
- beforeFunc: func(obj *sourcev1.Bucket, storage *Storage) error {
- obj.Status.Artifact = &sourcev1.Artifact{
- Path: fmt.Sprintf("/reconcile-storage/invalid.txt"),
- Revision: "d",
- }
- testStorage.SetArtifactURL(obj.Status.Artifact)
- return nil
- },
- want: sreconcile.ResultSuccess,
- assertPaths: []string{
- "!/reconcile-storage/invalid.txt",
- },
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(meta.ReconcilingCondition, "NoArtifact", "no artifact for resource in storage"),
- },
- },
- {
- name: "updates hostname on diff from current",
- beforeFunc: func(obj *sourcev1.Bucket, storage *Storage) error {
- obj.Status.Artifact = &sourcev1.Artifact{
- Path: fmt.Sprintf("/reconcile-storage/hostname.txt"),
- Revision: "f",
- Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80",
- URL: "http://outdated.com/reconcile-storage/hostname.txt",
- }
- if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil {
- return err
- }
- if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader("file"), 0o640); err != nil {
- return err
- }
- return nil
- },
- want: sreconcile.ResultSuccess,
- assertPaths: []string{
- "/reconcile-storage/hostname.txt",
- },
- assertArtifact: &sourcev1.Artifact{
- Path: "/reconcile-storage/hostname.txt",
- Revision: "f",
- Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80",
- URL: testStorage.Hostname + "/reconcile-storage/hostname.txt",
- Size: int64p(int64(len("file"))),
- },
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
-
- defer func() {
- g.Expect(os.RemoveAll(filepath.Join(testStorage.BasePath, "/reconcile-storage"))).To(Succeed())
- }()
-
- r := &BucketReconciler{
- EventRecorder: record.NewFakeRecorder(32),
- Storage: testStorage,
- }
-
- obj := &sourcev1.Bucket{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "test-",
- },
- }
- if tt.beforeFunc != nil {
- g.Expect(tt.beforeFunc(obj, testStorage)).To(Succeed())
- }
-
- index := newEtagIndex()
-
- got, err := r.reconcileStorage(context.TODO(), obj, index, "")
- g.Expect(err != nil).To(Equal(tt.wantErr))
- g.Expect(got).To(Equal(tt.want))
-
- g.Expect(obj.Status.Artifact).To(MatchArtifact(tt.assertArtifact))
- if tt.assertArtifact != nil && tt.assertArtifact.URL != "" {
- g.Expect(obj.Status.Artifact.URL).To(Equal(tt.assertArtifact.URL))
- }
- g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions))
-
- for _, p := range tt.assertPaths {
- absoluteP := filepath.Join(testStorage.BasePath, p)
- if !strings.HasPrefix(p, "!") {
- g.Expect(absoluteP).To(BeAnExistingFile())
- continue
- }
- g.Expect(absoluteP).NotTo(BeAnExistingFile())
- }
- })
- }
-}
-
-func TestBucketReconciler_reconcileSource_generic(t *testing.T) {
- tests := []struct {
- name string
- bucketName string
- bucketObjects []*s3mock.Object
- middleware http.Handler
- secret *corev1.Secret
- beforeFunc func(obj *sourcev1.Bucket)
- want sreconcile.Result
- wantErr bool
- assertIndex *etagIndex
- assertConditions []metav1.Condition
- }{
- {
- name: "Reconciles GCS source",
- bucketName: "dummy",
- bucketObjects: []*s3mock.Object{
- {
- Key: "test.txt",
- Content: []byte("test"),
- ContentType: "text/plain",
- LastModified: time.Now(),
- },
- },
- want: sreconcile.ResultSuccess,
- assertIndex: &etagIndex{
- index: map[string]string{
- "test.txt": "098f6bcd4621d373cade4e832627b4f6",
- },
- },
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"),
- *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"),
- },
- },
- // TODO(hidde): middleware for mock server
- //{
- // name: "authenticates using secretRef",
- // bucketName: "dummy",
- //},
- {
- name: "Observes non-existing secretRef",
- bucketName: "dummy",
- beforeFunc: func(obj *sourcev1.Bucket) {
- obj.Spec.SecretRef = &meta.LocalObjectReference{
- Name: "dummy",
- }
- },
- wantErr: true,
- assertIndex: newEtagIndex(),
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret '/dummy': secrets \"dummy\" not found"),
- },
- },
- {
- name: "Observes invalid secretRef",
- bucketName: "dummy",
- secret: &corev1.Secret{
- ObjectMeta: metav1.ObjectMeta{
- Name: "dummy",
- },
- },
- beforeFunc: func(obj *sourcev1.Bucket) {
- obj.Spec.SecretRef = &meta.LocalObjectReference{
- Name: "dummy",
- }
- },
- wantErr: true,
- assertIndex: newEtagIndex(),
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "invalid 'dummy' secret data: required fields 'accesskey' and 'secretkey'"),
- },
- },
- {
- name: "Observes non-existing bucket name",
- bucketName: "dummy",
- beforeFunc: func(obj *sourcev1.Bucket) {
- obj.Spec.BucketName = "invalid"
- },
- wantErr: true,
- assertIndex: newEtagIndex(),
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "bucket 'invalid' not found"),
- },
- },
- {
- name: "Transient bucket name API failure",
- beforeFunc: func(obj *sourcev1.Bucket) {
- obj.Spec.Endpoint = "transient.example.com"
- obj.Spec.BucketName = "unavailable"
- },
- wantErr: true,
- assertIndex: newEtagIndex(),
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to confirm existence of 'unavailable' bucket"),
- },
- },
- {
- name: ".sourceignore",
- bucketName: "dummy",
- bucketObjects: []*s3mock.Object{
- {
- Key: ".sourceignore",
- Content: []byte("ignored/file.txt"),
- ContentType: "text/plain",
- LastModified: time.Now(),
- },
- {
- Key: "ignored/file.txt",
- Content: []byte("ignored/file.txt"),
- ContentType: "text/plain",
- LastModified: time.Now(),
- },
- {
- Key: "included/file.txt",
- Content: []byte("included/file.txt"),
- ContentType: "text/plain",
- LastModified: time.Now(),
- },
- },
- want: sreconcile.ResultSuccess,
- assertIndex: &etagIndex{
- index: map[string]string{
- "included/file.txt": "5a4bc7048b3301f677fe15b8678be2f8",
- },
- },
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision '9fc2ddfc4a6f44e6c3efee40af36578b9e76d4d930eaf384b8435a0aa0bf7a0f'"),
- *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision '9fc2ddfc4a6f44e6c3efee40af36578b9e76d4d930eaf384b8435a0aa0bf7a0f'"),
- },
- },
- {
- name: "spec.ignore overrides .sourceignore",
- bucketName: "dummy",
- beforeFunc: func(obj *sourcev1.Bucket) {
- ignore := "!ignored/file.txt"
- obj.Spec.Ignore = &ignore
- },
- bucketObjects: []*s3mock.Object{
- {
- Key: ".sourceignore",
- Content: []byte("ignored/file.txt"),
- ContentType: "text/plain",
- LastModified: time.Now(),
- },
- {
- Key: "ignored/file.txt",
- Content: []byte("ignored/file.txt"),
- ContentType: "text/plain",
- LastModified: time.Now(),
- },
- {
- Key: "included/file.txt",
- Content: []byte("included/file.txt"),
- ContentType: "text/plain",
- LastModified: time.Now(),
- },
- },
- want: sreconcile.ResultSuccess,
- assertIndex: &etagIndex{
- index: map[string]string{
- "ignored/file.txt": "f08907038338288420ae7dc2d30c0497",
- "included/file.txt": "5a4bc7048b3301f677fe15b8678be2f8",
- },
- },
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision '117f586dc64cfc559329e21d286edcbb94cb6b1581517eaddc0ab5292b470cd5'"),
- *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision '117f586dc64cfc559329e21d286edcbb94cb6b1581517eaddc0ab5292b470cd5'"),
- },
- },
- {
- name: "Up-to-date artifact",
- bucketName: "dummy",
- beforeFunc: func(obj *sourcev1.Bucket) {
- obj.Status.Artifact = &sourcev1.Artifact{
- Revision: "b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479",
- }
- },
- bucketObjects: []*s3mock.Object{
- {
- Key: "test.txt",
- Content: []byte("test"),
- ContentType: "text/plain",
- LastModified: time.Now(),
- },
- },
- want: sreconcile.ResultSuccess,
- assertIndex: &etagIndex{
- index: map[string]string{
- "test.txt": "098f6bcd4621d373cade4e832627b4f6",
- },
- },
- assertConditions: []metav1.Condition{},
- },
- {
- name: "Removes FetchFailedCondition after reconciling source",
- bucketName: "dummy",
- beforeFunc: func(obj *sourcev1.Bucket) {
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to read test file")
- },
- bucketObjects: []*s3mock.Object{
- {
- Key: "test.txt",
- Content: []byte("test"),
- ContentType: "text/plain",
- LastModified: time.Now(),
- },
- },
- want: sreconcile.ResultSuccess,
- assertIndex: &etagIndex{
- index: map[string]string{
- "test.txt": "098f6bcd4621d373cade4e832627b4f6",
- },
- },
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"),
- *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"),
- },
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
-
- builder := fakeclient.NewClientBuilder().WithScheme(testEnv.Scheme())
- if tt.secret != nil {
- builder.WithObjects(tt.secret)
- }
- r := &BucketReconciler{
- EventRecorder: record.NewFakeRecorder(32),
- Client: builder.Build(),
- Storage: testStorage,
- }
- tmpDir := t.TempDir()
-
- obj := &sourcev1.Bucket{
- TypeMeta: metav1.TypeMeta{
- Kind: sourcev1.BucketKind,
- },
- ObjectMeta: metav1.ObjectMeta{
- Name: "test-bucket",
- },
- Spec: sourcev1.BucketSpec{
- Timeout: &metav1.Duration{Duration: timeout},
- },
- }
-
- var server *s3mock.Server
- if tt.bucketName != "" {
- server = s3mock.NewServer(tt.bucketName)
- server.Objects = tt.bucketObjects
- server.Start()
- defer server.Stop()
-
- g.Expect(server.HTTPAddress()).ToNot(BeEmpty())
- u, err := url.Parse(server.HTTPAddress())
- g.Expect(err).NotTo(HaveOccurred())
-
- obj.Spec.BucketName = tt.bucketName
- obj.Spec.Endpoint = u.Host
- // TODO(hidde): also test TLS
- obj.Spec.Insecure = true
- }
- if tt.beforeFunc != nil {
- tt.beforeFunc(obj)
- }
-
- index := newEtagIndex()
-
- got, err := r.reconcileSource(context.TODO(), obj, index, tmpDir)
- g.Expect(err != nil).To(Equal(tt.wantErr))
- g.Expect(got).To(Equal(tt.want))
-
- g.Expect(index.Index()).To(Equal(tt.assertIndex.Index()))
- g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions))
- })
- }
-}
-
-func TestBucketReconciler_reconcileSource_gcs(t *testing.T) {
- tests := []struct {
- name string
- bucketName string
- bucketObjects []*gcsmock.Object
- secret *corev1.Secret
- beforeFunc func(obj *sourcev1.Bucket)
- want sreconcile.Result
- wantErr bool
- assertIndex *etagIndex
- assertConditions []metav1.Condition
- }{
- {
- name: "Reconciles GCS source",
- bucketName: "dummy",
- bucketObjects: []*gcsmock.Object{
- {
- Key: "test.txt",
- ContentType: "text/plain",
- Content: []byte("test"),
- Generation: 3,
- },
- },
- secret: &corev1.Secret{
- ObjectMeta: metav1.ObjectMeta{
- Name: "dummy",
- },
- Data: map[string][]byte{
- "accesskey": []byte("key"),
- "secretkey": []byte("secret"),
- "serviceaccount": []byte("testsa"),
- },
- },
- beforeFunc: func(obj *sourcev1.Bucket) {
- obj.Spec.SecretRef = &meta.LocalObjectReference{
- Name: "dummy",
- }
- },
- want: sreconcile.ResultSuccess,
- assertIndex: &etagIndex{
- index: map[string]string{
- "test.txt": "098f6bcd4621d373cade4e832627b4f6",
- },
- },
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"),
- *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"),
- },
- },
- {
- name: "Observes non-existing secretRef",
- bucketName: "dummy",
- beforeFunc: func(obj *sourcev1.Bucket) {
- obj.Spec.SecretRef = &meta.LocalObjectReference{
- Name: "dummy",
- }
- },
- want: sreconcile.ResultEmpty,
- wantErr: true,
- assertIndex: newEtagIndex(),
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret '/dummy': secrets \"dummy\" not found"),
- },
- },
- {
- name: "Observes invalid secretRef",
- bucketName: "dummy",
- secret: &corev1.Secret{
- ObjectMeta: metav1.ObjectMeta{
- Name: "dummy",
- },
- },
- beforeFunc: func(obj *sourcev1.Bucket) {
- obj.Spec.SecretRef = &meta.LocalObjectReference{
- Name: "dummy",
- }
- },
- want: sreconcile.ResultEmpty,
- wantErr: true,
- assertIndex: newEtagIndex(),
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "invalid 'dummy' secret data: required fields"),
- },
- },
- {
- name: "Observes non-existing bucket name",
- bucketName: "dummy",
- beforeFunc: func(obj *sourcev1.Bucket) {
- obj.Spec.BucketName = "invalid"
- },
- want: sreconcile.ResultEmpty,
- wantErr: true,
- assertIndex: newEtagIndex(),
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "bucket 'invalid' not found"),
- },
- },
- {
- name: "Transient bucket name API failure",
- beforeFunc: func(obj *sourcev1.Bucket) {
- obj.Spec.Endpoint = "transient.example.com"
- obj.Spec.BucketName = "unavailable"
- },
- want: sreconcile.ResultEmpty,
- wantErr: true,
- assertIndex: newEtagIndex(),
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to confirm existence of 'unavailable' bucket"),
- },
- },
- {
- name: ".sourceignore",
- bucketName: "dummy",
- bucketObjects: []*gcsmock.Object{
- {
- Key: ".sourceignore",
- Content: []byte("ignored/file.txt"),
- ContentType: "text/plain",
- Generation: 1,
- },
- {
- Key: "ignored/file.txt",
- Content: []byte("ignored/file.txt"),
- ContentType: "text/plain",
- Generation: 4,
- },
- {
- Key: "included/file.txt",
- Content: []byte("included/file.txt"),
- ContentType: "text/plain",
- Generation: 3,
- },
- },
- want: sreconcile.ResultSuccess,
- assertIndex: &etagIndex{
- index: map[string]string{
- "included/file.txt": "5a4bc7048b3301f677fe15b8678be2f8",
- },
- },
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision '9fc2ddfc4a6f44e6c3efee40af36578b9e76d4d930eaf384b8435a0aa0bf7a0f'"),
- *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision '9fc2ddfc4a6f44e6c3efee40af36578b9e76d4d930eaf384b8435a0aa0bf7a0f'"),
- },
- },
- {
- name: "spec.ignore overrides .sourceignore",
- bucketName: "dummy",
- beforeFunc: func(obj *sourcev1.Bucket) {
- ignore := "!ignored/file.txt"
- obj.Spec.Ignore = &ignore
- },
- bucketObjects: []*gcsmock.Object{
- {
- Key: ".sourceignore",
- Content: []byte("ignored/file.txt"),
- ContentType: "text/plain",
- Generation: 1,
- },
- {
- Key: "ignored/file.txt",
- Content: []byte("ignored/file.txt"),
- ContentType: "text/plain",
- Generation: 2,
- },
- {
- Key: "included/file.txt",
- Content: []byte("included/file.txt"),
- ContentType: "text/plain",
- Generation: 4,
- },
- },
- want: sreconcile.ResultSuccess,
- assertIndex: &etagIndex{
- index: map[string]string{
- "ignored/file.txt": "f08907038338288420ae7dc2d30c0497",
- "included/file.txt": "5a4bc7048b3301f677fe15b8678be2f8",
- },
- },
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision '117f586dc64cfc559329e21d286edcbb94cb6b1581517eaddc0ab5292b470cd5'"),
- *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision '117f586dc64cfc559329e21d286edcbb94cb6b1581517eaddc0ab5292b470cd5'"),
- },
- },
- {
- name: "Up-to-date artifact",
- bucketName: "dummy",
- beforeFunc: func(obj *sourcev1.Bucket) {
- obj.Status.Artifact = &sourcev1.Artifact{
- Revision: "b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479",
- }
- },
- bucketObjects: []*gcsmock.Object{
- {
- Key: "test.txt",
- Content: []byte("test"),
- ContentType: "text/plain",
- Generation: 2,
- },
- },
- want: sreconcile.ResultSuccess,
- assertIndex: &etagIndex{
- index: map[string]string{
- "test.txt": "098f6bcd4621d373cade4e832627b4f6",
- },
- },
- assertConditions: []metav1.Condition{},
- },
- {
- name: "Removes FetchFailedCondition after reconciling source",
- bucketName: "dummy",
- beforeFunc: func(obj *sourcev1.Bucket) {
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.BucketOperationFailedReason, "failed to read test file")
- },
- bucketObjects: []*gcsmock.Object{
- {
- Key: "test.txt",
- Content: []byte("test"),
- ContentType: "text/plain",
- Generation: 2,
- },
- },
- want: sreconcile.ResultSuccess,
- assertIndex: &etagIndex{
- index: map[string]string{
- "test.txt": "098f6bcd4621d373cade4e832627b4f6",
- },
- },
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"),
- *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'b4c2a60ce44b67f5b659a95ce4e4cc9e2a86baf13afb72bd397c5384cbc0e479'"),
- },
- },
- // TODO: Middleware for mock server to test authentication using secret.
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
-
- builder := fakeclient.NewClientBuilder().WithScheme(testEnv.Scheme())
- if tt.secret != nil {
- builder.WithObjects(tt.secret)
- }
- r := &BucketReconciler{
- EventRecorder: record.NewFakeRecorder(32),
- Client: builder.Build(),
- Storage: testStorage,
- }
- tmpDir := t.TempDir()
-
- // Test bucket object.
- obj := &sourcev1.Bucket{
- TypeMeta: metav1.TypeMeta{
- Kind: sourcev1.BucketKind,
- },
- ObjectMeta: metav1.ObjectMeta{
- Name: "test-bucket",
- },
- Spec: sourcev1.BucketSpec{
- BucketName: tt.bucketName,
- Timeout: &metav1.Duration{Duration: timeout},
- Provider: sourcev1.GoogleBucketProvider,
- },
- }
-
- // Set up the mock GCP bucket server.
- server := gcsmock.NewServer(tt.bucketName)
- server.Objects = tt.bucketObjects
- server.Start()
- defer server.Stop()
-
- g.Expect(server.HTTPAddress()).ToNot(BeEmpty())
-
- obj.Spec.Endpoint = server.HTTPAddress()
- obj.Spec.Insecure = true
-
- if tt.beforeFunc != nil {
- tt.beforeFunc(obj)
- }
-
- // Set the GCP storage host to be used by the GCP client.
- g.Expect(os.Setenv(EnvGcpStorageHost, obj.Spec.Endpoint)).ToNot(HaveOccurred())
- defer func() {
- g.Expect(os.Unsetenv(EnvGcpStorageHost)).ToNot(HaveOccurred())
- }()
-
- index := newEtagIndex()
-
- got, err := r.reconcileSource(context.TODO(), obj, index, tmpDir)
- t.Log(err)
- g.Expect(err != nil).To(Equal(tt.wantErr))
- g.Expect(got).To(Equal(tt.want))
-
- g.Expect(index.Index()).To(Equal(tt.assertIndex.Index()))
- g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions))
- })
- }
-}
-
-func TestBucketReconciler_reconcileArtifact(t *testing.T) {
- tests := []struct {
- name string
- beforeFunc func(t *WithT, obj *sourcev1.Bucket, index *etagIndex, dir string)
- afterFunc func(t *WithT, obj *sourcev1.Bucket, dir string)
- want sreconcile.Result
- wantErr bool
- assertConditions []metav1.Condition
- }{
- {
- name: "Archiving artifact to storage makes ArtifactInStorage=True",
- beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *etagIndex, dir string) {
- obj.Spec.Interval = metav1.Duration{Duration: interval}
- },
- want: sreconcile.ResultSuccess,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"),
- },
- },
- {
- name: "Up-to-date artifact should not persist and update status",
- beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *etagIndex, dir string) {
- revision, _ := index.Revision()
- obj.Spec.Interval = metav1.Duration{Duration: interval}
- // Incomplete artifact
- obj.Status.Artifact = &sourcev1.Artifact{Revision: revision}
- },
- afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) {
- // Still incomplete
- t.Expect(obj.Status.URL).To(BeEmpty())
- },
- want: sreconcile.ResultSuccess,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"),
- },
- },
- {
- name: "Removes ArtifactOutdatedCondition after creating a new artifact",
- beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *etagIndex, dir string) {
- obj.Spec.Interval = metav1.Duration{Duration: interval}
- conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "")
- },
- want: sreconcile.ResultSuccess,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"),
- },
- },
- {
- name: "Creates latest symlink to the created artifact",
- beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *etagIndex, dir string) {
- obj.Spec.Interval = metav1.Duration{Duration: interval}
- },
- afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) {
- localPath := testStorage.LocalPath(*obj.GetArtifact())
- symlinkPath := filepath.Join(filepath.Dir(localPath), "latest.tar.gz")
- targetFile, err := os.Readlink(symlinkPath)
- t.Expect(err).NotTo(HaveOccurred())
- t.Expect(localPath).To(Equal(targetFile))
- },
- want: sreconcile.ResultSuccess,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'"),
- },
- },
- {
- name: "Dir path deleted",
- beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *etagIndex, dir string) {
- t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred())
- },
- want: sreconcile.ResultEmpty,
- wantErr: true,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.StatOperationFailedReason, "failed to stat source path"),
- },
- },
- {
- name: "Dir path is not a directory",
- beforeFunc: func(t *WithT, obj *sourcev1.Bucket, index *etagIndex, dir string) {
- // Remove the given directory and create a file for the same
- // path.
- t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred())
- f, err := os.Create(dir)
- defer f.Close()
- t.Expect(err).ToNot(HaveOccurred())
- },
- afterFunc: func(t *WithT, obj *sourcev1.Bucket, dir string) {
- t.Expect(os.RemoveAll(dir)).ToNot(HaveOccurred())
- },
- want: sreconcile.ResultEmpty,
- wantErr: true,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.InvalidPathReason, "is not a directory"),
- },
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
-
- r := &BucketReconciler{
- EventRecorder: record.NewFakeRecorder(32),
- Storage: testStorage,
- }
-
- tmpDir := t.TempDir()
-
- obj := &sourcev1.Bucket{
- TypeMeta: metav1.TypeMeta{
- Kind: sourcev1.BucketKind,
- },
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "test-bucket-",
- Generation: 1,
- Namespace: "default",
- },
- Spec: sourcev1.BucketSpec{
- Timeout: &metav1.Duration{Duration: timeout},
- },
- }
-
- index := newEtagIndex()
-
- if tt.beforeFunc != nil {
- tt.beforeFunc(g, obj, index, tmpDir)
- }
-
- got, err := r.reconcileArtifact(context.TODO(), obj, index, tmpDir)
- g.Expect(err != nil).To(Equal(tt.wantErr))
- g.Expect(got).To(Equal(tt.want))
-
- // On error, artifact is empty. Check artifacts only on successful
- // reconcile.
- g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions))
-
- if tt.afterFunc != nil {
- tt.afterFunc(g, obj, tmpDir)
- }
- })
- }
-}
-
-func Test_etagIndex_Revision(t *testing.T) {
- tests := []struct {
- name string
- list map[string]string
- want string
- wantErr bool
- }{
- {
- name: "index with items",
- list: map[string]string{
- "one": "one",
- "two": "two",
- "three": "three",
- },
- want: "c0837b3f32bb67c5275858fdb96595f87801cf3c2f622c049918a051d29b2c7f",
- },
- {
- name: "index with items in different order",
- list: map[string]string{
- "three": "three",
- "one": "one",
- "two": "two",
- },
- want: "c0837b3f32bb67c5275858fdb96595f87801cf3c2f622c049918a051d29b2c7f",
- },
- {
- name: "empty index",
- list: map[string]string{},
- want: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- },
- {
- name: "nil index",
- list: nil,
- want: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- index := &etagIndex{index: tt.list}
- got, err := index.Revision()
- if (err != nil) != tt.wantErr {
- t.Errorf("revision() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if got != tt.want {
- t.Errorf("revision() got = %v, want %v", got, tt.want)
- }
- })
- }
-}
-
-func TestBucketReconciler_statusConditions(t *testing.T) {
- tests := []struct {
- name string
- beforeFunc func(obj *sourcev1.Bucket)
- assertConditions []metav1.Condition
- }{
- {
- name: "positive conditions only",
- beforeFunc: func(obj *sourcev1.Bucket) {
- conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision")
- },
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision"),
- *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision"),
- },
- },
- {
- name: "multiple failures",
- beforeFunc: func(obj *sourcev1.Bucket) {
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret")
- conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory")
- conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "some error")
- },
- assertConditions: []metav1.Condition{
- *conditions.FalseCondition(meta.ReadyCondition, sourcev1.DirCreationFailedReason, "failed to create directory"),
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"),
- *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory"),
- *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "some error"),
- },
- },
- {
- name: "mixed positive and negative conditions",
- beforeFunc: func(obj *sourcev1.Bucket) {
- conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision")
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret")
- },
- assertConditions: []metav1.Condition{
- *conditions.FalseCondition(meta.ReadyCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"),
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"),
- *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision"),
- },
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
-
- obj := &sourcev1.Bucket{
- TypeMeta: metav1.TypeMeta{
- Kind: sourcev1.BucketKind,
- APIVersion: "source.toolkit.fluxcd.io/v1beta2",
- },
- ObjectMeta: metav1.ObjectMeta{
- Name: "bucket",
- Namespace: "foo",
- },
- }
- clientBuilder := fake.NewClientBuilder()
- clientBuilder.WithObjects(obj)
- c := clientBuilder.Build()
-
- patchHelper, err := patch.NewHelper(obj, c)
- g.Expect(err).ToNot(HaveOccurred())
-
- if tt.beforeFunc != nil {
- tt.beforeFunc(obj)
- }
-
- ctx := context.TODO()
- recResult := sreconcile.ResultSuccess
- var retErr error
-
- summarizeHelper := summarize.NewHelper(record.NewFakeRecorder(32), patchHelper)
- summarizeOpts := []summarize.Option{
- summarize.WithConditions(bucketReadyCondition),
- summarize.WithReconcileResult(recResult),
- summarize.WithReconcileError(retErr),
- summarize.WithIgnoreNotFound(),
- summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetRequeueAfter()}),
- summarize.WithPatchFieldOwner("source-controller"),
- }
- _, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...)
-
- key := client.ObjectKeyFromObject(obj)
- g.Expect(c.Get(ctx, key, obj)).ToNot(HaveOccurred())
- g.Expect(obj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions))
- })
- }
-}
-
-func TestBucketReconciler_notify(t *testing.T) {
- tests := []struct {
- name string
- res sreconcile.Result
- resErr error
- oldObjBeforeFunc func(obj *sourcev1.Bucket)
- newObjBeforeFunc func(obj *sourcev1.Bucket)
- wantEvent string
- }{
- {
- name: "error - no event",
- res: sreconcile.ResultEmpty,
- resErr: errors.New("some error"),
- },
- {
- name: "new artifact",
- res: sreconcile.ResultSuccess,
- resErr: nil,
- newObjBeforeFunc: func(obj *sourcev1.Bucket) {
- obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
- },
- wantEvent: "Normal NewArtifact stored artifact with 2 fetched files from",
- },
- {
- name: "recovery from failure",
- res: sreconcile.ResultSuccess,
- resErr: nil,
- oldObjBeforeFunc: func(obj *sourcev1.Bucket) {
- obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail")
- conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo")
- },
- newObjBeforeFunc: func(obj *sourcev1.Bucket) {
- obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
- conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
- },
- wantEvent: "Normal Succeeded stored artifact with 2 fetched files from",
- },
- {
- name: "recovery and new artifact",
- res: sreconcile.ResultSuccess,
- resErr: nil,
- oldObjBeforeFunc: func(obj *sourcev1.Bucket) {
- obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail")
- conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo")
- },
- newObjBeforeFunc: func(obj *sourcev1.Bucket) {
- obj.Status.Artifact = &sourcev1.Artifact{Revision: "aaa", Checksum: "bbb"}
- conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
- },
- wantEvent: "Normal NewArtifact stored artifact with 2 fetched files from",
- },
- {
- name: "no updates",
- res: sreconcile.ResultSuccess,
- resErr: nil,
- oldObjBeforeFunc: func(obj *sourcev1.Bucket) {
- obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
- conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
- },
- newObjBeforeFunc: func(obj *sourcev1.Bucket) {
- obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
- conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
- },
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
-
- recorder := record.NewFakeRecorder(32)
-
- oldObj := &sourcev1.Bucket{
- Spec: sourcev1.BucketSpec{
- BucketName: "test-bucket",
- },
- }
- newObj := oldObj.DeepCopy()
-
- if tt.oldObjBeforeFunc != nil {
- tt.oldObjBeforeFunc(oldObj)
- }
- if tt.newObjBeforeFunc != nil {
- tt.newObjBeforeFunc(newObj)
- }
-
- reconciler := &BucketReconciler{
- EventRecorder: recorder,
- }
- index := &etagIndex{
- index: map[string]string{
- "zzz": "qqq",
- "bbb": "ddd",
- },
- }
- reconciler.notify(ctx, oldObj, newObj, index, tt.res, tt.resErr)
-
- select {
- case x, ok := <-recorder.Events:
- g.Expect(ok).To(Equal(tt.wantEvent != ""), "unexpected event received")
- if tt.wantEvent != "" {
- g.Expect(x).To(ContainSubstring(tt.wantEvent))
- }
- default:
- if tt.wantEvent != "" {
- t.Errorf("expected some event to be emitted")
- }
- }
- })
- }
-}
diff --git a/controllers/gitrepository_controller.go b/controllers/gitrepository_controller.go
deleted file mode 100644
index 64f651efa..000000000
--- a/controllers/gitrepository_controller.go
+++ /dev/null
@@ -1,977 +0,0 @@
-/*
-Copyright 2020 The Flux authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package controllers
-
-import (
- "context"
- "crypto/sha256"
- "errors"
- "fmt"
- "os"
- "path/filepath"
- "strconv"
- "strings"
- "time"
-
- securejoin "github.com/cyphar/filepath-securejoin"
- "github.com/fluxcd/pkg/runtime/logger"
- corev1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/types"
- "k8s.io/apimachinery/pkg/util/uuid"
- kuberecorder "k8s.io/client-go/tools/record"
- ctrl "sigs.k8s.io/controller-runtime"
- "sigs.k8s.io/controller-runtime/pkg/builder"
- "sigs.k8s.io/controller-runtime/pkg/client"
- "sigs.k8s.io/controller-runtime/pkg/controller"
- "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
- "sigs.k8s.io/controller-runtime/pkg/predicate"
- "sigs.k8s.io/controller-runtime/pkg/ratelimiter"
-
- "github.com/fluxcd/pkg/apis/meta"
- "github.com/fluxcd/pkg/runtime/conditions"
- helper "github.com/fluxcd/pkg/runtime/controller"
- "github.com/fluxcd/pkg/runtime/events"
- "github.com/fluxcd/pkg/runtime/patch"
- "github.com/fluxcd/pkg/runtime/predicates"
-
- sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
- serror "github.com/fluxcd/source-controller/internal/error"
- "github.com/fluxcd/source-controller/internal/features"
- sreconcile "github.com/fluxcd/source-controller/internal/reconcile"
- "github.com/fluxcd/source-controller/internal/reconcile/summarize"
- "github.com/fluxcd/source-controller/internal/util"
- "github.com/fluxcd/source-controller/pkg/git"
- "github.com/fluxcd/source-controller/pkg/git/strategy"
- "github.com/fluxcd/source-controller/pkg/sourceignore"
-)
-
-// gitRepositoryReadyCondition contains the information required to summarize a
-// v1beta2.GitRepository Ready Condition.
-var gitRepositoryReadyCondition = summarize.Conditions{
- Target: meta.ReadyCondition,
- Owned: []string{
- sourcev1.StorageOperationFailedCondition,
- sourcev1.FetchFailedCondition,
- sourcev1.IncludeUnavailableCondition,
- sourcev1.ArtifactOutdatedCondition,
- sourcev1.ArtifactInStorageCondition,
- sourcev1.SourceVerifiedCondition,
- meta.ReadyCondition,
- meta.ReconcilingCondition,
- meta.StalledCondition,
- },
- Summarize: []string{
- sourcev1.StorageOperationFailedCondition,
- sourcev1.FetchFailedCondition,
- sourcev1.IncludeUnavailableCondition,
- sourcev1.ArtifactOutdatedCondition,
- sourcev1.ArtifactInStorageCondition,
- sourcev1.SourceVerifiedCondition,
- meta.StalledCondition,
- meta.ReconcilingCondition,
- },
- NegativePolarity: []string{
- sourcev1.StorageOperationFailedCondition,
- sourcev1.FetchFailedCondition,
- sourcev1.IncludeUnavailableCondition,
- sourcev1.ArtifactOutdatedCondition,
- meta.StalledCondition,
- meta.ReconcilingCondition,
- },
-}
-
-// gitRepositoryFailConditions contains the conditions that represent a failure.
-var gitRepositoryFailConditions = []string{
- sourcev1.FetchFailedCondition,
- sourcev1.IncludeUnavailableCondition,
- sourcev1.StorageOperationFailedCondition,
-}
-
-// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=gitrepositories,verbs=get;list;watch;create;update;patch;delete
-// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=gitrepositories/status,verbs=get;update;patch
-// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=gitrepositories/finalizers,verbs=get;create;update;patch;delete
-// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch
-
-// GitRepositoryReconciler reconciles a v1beta2.GitRepository object.
-type GitRepositoryReconciler struct {
- client.Client
- kuberecorder.EventRecorder
- helper.Metrics
-
- Storage *Storage
- ControllerName string
- // Libgit2TransportInitialized lets the reconciler know whether
- // libgit2 transport was intialized successfully.
- Libgit2TransportInitialized func() bool
-
- requeueDependency time.Duration
- features map[string]bool
-}
-
-type GitRepositoryReconcilerOptions struct {
- MaxConcurrentReconciles int
- DependencyRequeueInterval time.Duration
- RateLimiter ratelimiter.RateLimiter
-}
-
-// gitRepositoryReconcileFunc is the function type for all the
-// v1beta2.GitRepository (sub)reconcile functions.
-type gitRepositoryReconcileFunc func(ctx context.Context, obj *sourcev1.GitRepository, commit *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error)
-
-func (r *GitRepositoryReconciler) SetupWithManager(mgr ctrl.Manager) error {
- return r.SetupWithManagerAndOptions(mgr, GitRepositoryReconcilerOptions{})
-}
-
-func (r *GitRepositoryReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts GitRepositoryReconcilerOptions) error {
- r.requeueDependency = opts.DependencyRequeueInterval
-
- if r.features == nil {
- r.features = map[string]bool{}
- }
-
- // Check and enable gated features.
- if oc, _ := features.Enabled(features.OptimizedGitClones); oc {
- r.features[features.OptimizedGitClones] = true
- }
-
- return ctrl.NewControllerManagedBy(mgr).
- For(&sourcev1.GitRepository{}, builder.WithPredicates(
- predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{}),
- )).
- WithOptions(controller.Options{
- MaxConcurrentReconciles: opts.MaxConcurrentReconciles,
- RateLimiter: opts.RateLimiter,
- RecoverPanic: true,
- }).
- Complete(r)
-}
-
-func (r *GitRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, retErr error) {
- start := time.Now()
- log := ctrl.LoggerFrom(ctx).
- // Sets a reconcile ID to correlate logs from all suboperations.
- WithValues("reconcileID", uuid.NewUUID())
-
- // logger will be associated to the new context that is
- // returned from ctrl.LoggerInto.
- ctx = ctrl.LoggerInto(ctx, log)
-
- // Fetch the GitRepository
- obj := &sourcev1.GitRepository{}
- if err := r.Get(ctx, req.NamespacedName, obj); err != nil {
- return ctrl.Result{}, client.IgnoreNotFound(err)
- }
-
- // Record suspended status metric
- r.RecordSuspend(ctx, obj, obj.Spec.Suspend)
-
- // Return early if the object is suspended
- if obj.Spec.Suspend {
- log.Info("reconciliation is suspended for this object")
- return ctrl.Result{}, nil
- }
-
- // Initialize the patch helper with the current version of the object.
- patchHelper, err := patch.NewHelper(obj, r.Client)
- if err != nil {
- return ctrl.Result{}, err
- }
-
- // recResult stores the abstracted reconcile result.
- var recResult sreconcile.Result
-
- // Always attempt to patch the object and status after each reconciliation
- // NOTE: The final runtime result and error are set in this block.
- defer func() {
- summarizeHelper := summarize.NewHelper(r.EventRecorder, patchHelper)
- summarizeOpts := []summarize.Option{
- summarize.WithConditions(gitRepositoryReadyCondition),
- summarize.WithReconcileResult(recResult),
- summarize.WithReconcileError(retErr),
- summarize.WithIgnoreNotFound(),
- summarize.WithProcessors(
- summarize.ErrorActionHandler,
- summarize.RecordReconcileReq,
- ),
- summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetRequeueAfter()}),
- summarize.WithPatchFieldOwner(r.ControllerName),
- }
- result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...)
-
- // Always record readiness and duration metrics
- r.Metrics.RecordReadiness(ctx, obj)
- r.Metrics.RecordDuration(ctx, obj, start)
- }()
-
- // Add finalizer first if not exist to avoid the race condition
- // between init and delete
- if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) {
- controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer)
- recResult = sreconcile.ResultRequeue
- return
- }
-
- // Examine if the object is under deletion
- if !obj.ObjectMeta.DeletionTimestamp.IsZero() {
- recResult, retErr = r.reconcileDelete(ctx, obj)
- return
- }
-
- // Reconcile actual object
- reconcilers := []gitRepositoryReconcileFunc{
- r.reconcileStorage,
- r.reconcileSource,
- r.reconcileInclude,
- r.reconcileArtifact,
- }
- recResult, retErr = r.reconcile(ctx, obj, reconcilers)
- return
-}
-
-// reconcile iterates through the gitRepositoryReconcileFunc tasks for the
-// object. It returns early on the first call that returns
-// reconcile.ResultRequeue, or produces an error.
-func (r *GitRepositoryReconciler) reconcile(ctx context.Context, obj *sourcev1.GitRepository, reconcilers []gitRepositoryReconcileFunc) (sreconcile.Result, error) {
- oldObj := obj.DeepCopy()
-
- // Mark as reconciling if generation differs
- if obj.Generation != obj.Status.ObservedGeneration {
- conditions.MarkReconciling(obj, "NewGeneration", "reconciling new object generation (%d)", obj.Generation)
- }
-
- // Create temp dir for Git clone
- tmpDir, err := util.TempDirForObj("", obj)
- if err != nil {
- e := serror.NewGeneric(
- fmt.Errorf("failed to create temporary working directory: %w", err),
- sourcev1.DirCreationFailedReason,
- )
- conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
- return sreconcile.ResultEmpty, e
- }
- defer func() {
- if err = os.RemoveAll(tmpDir); err != nil {
- ctrl.LoggerFrom(ctx).Error(err, "failed to remove temporary working directory")
- }
- }()
- conditions.Delete(obj, sourcev1.StorageOperationFailedCondition)
-
- // Run the sub-reconcilers and build the result of reconciliation.
- var (
- commit git.Commit
- includes artifactSet
-
- res sreconcile.Result
- resErr error
- )
- for _, rec := range reconcilers {
- recResult, err := rec(ctx, obj, &commit, &includes, tmpDir)
- // Exit immediately on ResultRequeue.
- if recResult == sreconcile.ResultRequeue {
- return sreconcile.ResultRequeue, nil
- }
- // If an error is received, prioritize the returned results because an
- // error also means immediate requeue.
- if err != nil {
- resErr = err
- res = recResult
- break
- }
- // Prioritize requeue request in the result.
- res = sreconcile.LowestRequeuingResult(res, recResult)
- }
-
- r.notify(ctx, oldObj, obj, commit, res, resErr)
-
- return res, resErr
-}
-
-// notify emits notification related to the result of reconciliation.
-func (r *GitRepositoryReconciler) notify(ctx context.Context, oldObj, newObj *sourcev1.GitRepository, commit git.Commit, res sreconcile.Result, resErr error) {
- // Notify successful reconciliation for new artifact, no-op reconciliation
- // and recovery from any failure.
- if r.shouldNotify(oldObj, newObj, res, resErr) {
- annotations := map[string]string{
- sourcev1.GroupVersion.Group + "/revision": newObj.Status.Artifact.Revision,
- sourcev1.GroupVersion.Group + "/checksum": newObj.Status.Artifact.Checksum,
- }
-
- var oldChecksum string
- if oldObj.GetArtifact() != nil {
- oldChecksum = oldObj.GetArtifact().Checksum
- }
-
- // A partial commit due to no-op clone doesn't contain the commit
- // message information. Have separate message for it.
- var message string
- if git.IsConcreteCommit(commit) {
- message = fmt.Sprintf("stored artifact for commit '%s'", commit.ShortMessage())
- } else {
- message = fmt.Sprintf("stored artifact for commit '%s'", commit.String())
- }
-
- // Notify on new artifact and failure recovery.
- if oldChecksum != newObj.GetArtifact().Checksum {
- r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal,
- "NewArtifact", message)
- ctrl.LoggerFrom(ctx).Info(message)
- } else {
- if sreconcile.FailureRecovery(oldObj, newObj, gitRepositoryFailConditions) {
- r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal,
- meta.SucceededReason, message)
- ctrl.LoggerFrom(ctx).Info(message)
- }
- }
- }
-}
-
-// shouldNotify analyzes the result of subreconcilers and determines if a
-// notification should be sent. It decides about the final informational
-// notifications after the reconciliation. Failure notification and in-line
-// notifications are not handled here.
-func (r *GitRepositoryReconciler) shouldNotify(oldObj, newObj *sourcev1.GitRepository, res sreconcile.Result, resErr error) bool {
- // Notify for successful reconciliation.
- if resErr == nil && res == sreconcile.ResultSuccess && newObj.Status.Artifact != nil {
- return true
- }
- // Notify for no-op reconciliation with ignore error.
- if resErr != nil && res == sreconcile.ResultEmpty && newObj.Status.Artifact != nil {
- // Convert to Generic error and check for ignore.
- if ge, ok := resErr.(*serror.Generic); ok {
- return ge.Ignore == true
- }
- }
- return false
-}
-
-// reconcileStorage ensures the current state of the storage matches the
-// desired and previously observed state.
-//
-// The garbage collection is executed based on the flag configured settings and
-// may remove files that are beyond their TTL or the maximum number of files
-// to survive a collection cycle.
-// If the Artifact in the Status of the object disappeared from the Storage,
-// it is removed from the object.
-// If the object does not have an Artifact in its Status, a Reconciling
-// condition is added.
-// The hostname of any URL in the Status of the object are updated, to ensure
-// they match the Storage server hostname of current runtime.
-func (r *GitRepositoryReconciler) reconcileStorage(ctx context.Context,
- obj *sourcev1.GitRepository, _ *git.Commit, _ *artifactSet, _ string) (sreconcile.Result, error) {
- // Garbage collect previous advertised artifact(s) from storage
- _ = r.garbageCollect(ctx, obj)
-
- // Determine if the advertised artifact is still in storage
- if artifact := obj.GetArtifact(); artifact != nil && !r.Storage.ArtifactExist(*artifact) {
- obj.Status.Artifact = nil
- obj.Status.URL = ""
- // Remove the condition as the artifact doesn't exist.
- conditions.Delete(obj, sourcev1.ArtifactInStorageCondition)
- }
-
- // Record that we do not have an artifact
- if obj.GetArtifact() == nil {
- conditions.MarkReconciling(obj, "NoArtifact", "no artifact for resource in storage")
- conditions.Delete(obj, sourcev1.ArtifactInStorageCondition)
- return sreconcile.ResultSuccess, nil
- }
-
- // Always update URLs to ensure hostname is up-to-date
- // TODO(hidde): we may want to send out an event only if we notice the URL has changed
- r.Storage.SetArtifactURL(obj.GetArtifact())
- obj.Status.URL = r.Storage.SetHostname(obj.Status.URL)
-
- return sreconcile.ResultSuccess, nil
-}
-
-// reconcileSource ensures the upstream Git repository and reference can be
-// cloned and checked out using the specified configuration, and observes its
-// state. It also checks if the included repositories are available for use.
-//
-// The included repositories are fetched and their metadata are stored. In case
-// one of the included repositories isn't ready, it records
-// v1beta2.IncludeUnavailableCondition=True and returns early. When all the
-// included repositories are ready, it removes
-// v1beta2.IncludeUnavailableCondition from the object.
-// When the included artifactSet differs from the current set in the Status of
-// the object, it marks the object with v1beta2.ArtifactOutdatedCondition=True.
-// The repository is cloned to the given dir, using the specified configuration
-// to check out the reference. In case of an error during this process
-// (including transient errors), it records v1beta2.FetchFailedCondition=True
-// and returns early.
-// On a successful checkout, it removes v1beta2.FetchFailedCondition and
-// compares the current revision of HEAD to the revision of the Artifact in the
-// Status of the object. It records v1beta2.ArtifactOutdatedCondition=True when
-// they differ.
-// If specified, the signature of the Git commit is verified. If the signature
-// can not be verified or the verification fails, it records
-// v1beta2.SourceVerifiedCondition=False and returns early. When successful,
-// it records v1beta2.SourceVerifiedCondition=True.
-// When all the above is successful, the given Commit pointer is set to the
-// commit of the checked out Git repository.
-//
-// If the optimized git clone feature is enabled, it checks if the remote repo
-// and the local artifact are on the same revision, and no other source content
-// related configurations have changed since last reconciliation. If there's a
-// change, it short-circuits the whole reconciliation with an early return.
-func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context,
- obj *sourcev1.GitRepository, commit *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) {
- // Exit early, if we need to use libgit2 AND managed transport hasn't been intialized.
- if !r.Libgit2TransportInitialized() && obj.Spec.GitImplementation == sourcev1.LibGit2Implementation {
- return sreconcile.ResultEmpty, serror.NewStalling(
- errors.New("libgit2 managed transport not initialized"), "Libgit2TransportNotEnabled",
- )
- }
- // Configure authentication strategy to access the source
- var authOpts *git.AuthOptions
- var err error
- if obj.Spec.SecretRef != nil {
- // Attempt to retrieve secret
- name := types.NamespacedName{
- Namespace: obj.GetNamespace(),
- Name: obj.Spec.SecretRef.Name,
- }
- var secret corev1.Secret
- if err := r.Client.Get(ctx, name, &secret); err != nil {
- e := serror.NewGeneric(
- fmt.Errorf("failed to get secret '%s': %w", name.String(), err),
- sourcev1.AuthenticationFailedReason,
- )
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
- // Return error as the world as observed may change
- return sreconcile.ResultEmpty, e
- }
-
- // Configure strategy with secret
- authOpts, err = git.AuthOptionsFromSecret(obj.Spec.URL, &secret)
- } else {
- // Set the minimal auth options for valid transport.
- authOpts, err = git.AuthOptionsWithoutSecret(obj.Spec.URL)
- }
- if err != nil {
- e := serror.NewGeneric(
- fmt.Errorf("failed to configure auth strategy for Git implementation '%s': %w", obj.Spec.GitImplementation, err),
- sourcev1.AuthenticationFailedReason,
- )
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
- // Return error as the contents of the secret may change
- return sreconcile.ResultEmpty, e
- }
-
- // Fetch the included artifact metadata.
- artifacts, err := r.fetchIncludes(ctx, obj)
- if err != nil {
- return sreconcile.ResultEmpty, err
- }
-
- // Observe if the artifacts still match the previous included ones
- if artifacts.Diff(obj.Status.IncludedArtifacts) {
- message := fmt.Sprintf("included artifacts differ from last observed includes")
- conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "IncludeChange", message)
- conditions.MarkReconciling(obj, "IncludeChange", message)
- }
-
- // Persist the ArtifactSet.
- *includes = *artifacts
-
- var optimizedClone bool
- if val, ok := r.features[features.OptimizedGitClones]; ok && val {
- optimizedClone = true
- }
-
- c, err := r.gitCheckout(ctx, obj, authOpts, dir, optimizedClone)
- if err != nil {
- return sreconcile.ResultEmpty, err
- }
- // Assign the commit to the shared commit reference.
- *commit = *c
-
- // If it's a partial commit obtained from an existing artifact, check if the
- // reconciliation can be skipped if other configurations have not changed.
- if !git.IsConcreteCommit(*commit) {
- // Calculate content configuration checksum.
- if r.calculateContentConfigChecksum(obj, includes) == obj.Status.ContentConfigChecksum {
- ge := serror.NewGeneric(
- fmt.Errorf("no changes since last reconcilation: observed revision '%s'",
- commit.String()), sourcev1.GitOperationSucceedReason,
- )
- ge.Notification = false
- ge.Ignore = true
- ge.Event = corev1.EventTypeNormal
- // Remove any stale fetch failed condition.
- conditions.Delete(obj, sourcev1.FetchFailedCondition)
- // IMPORTANT: This must be set to ensure that the observed
- // generation of this condition is updated. In case of full
- // reconciliation reconcileArtifact() ensures that it's set at the
- // very end.
- conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason,
- "stored artifact for revision '%s'", commit.String())
- // TODO: Find out if such condition setting is needed when commit
- // signature verification is enabled.
- return sreconcile.ResultEmpty, ge
- }
-
- // If we can't skip the reconciliation, checkout again without any
- // optimization.
- c, err := r.gitCheckout(ctx, obj, authOpts, dir, false)
- if err != nil {
- return sreconcile.ResultEmpty, err
- }
- *commit = *c
- }
- ctrl.LoggerFrom(ctx).V(logger.DebugLevel).Info("git repository checked out", "url", obj.Spec.URL, "revision", commit.String())
- conditions.Delete(obj, sourcev1.FetchFailedCondition)
-
- // Verify commit signature
- if result, err := r.verifyCommitSignature(ctx, obj, *commit); err != nil || result == sreconcile.ResultEmpty {
- return result, err
- }
-
- // Mark observations about the revision on the object
- if !obj.GetArtifact().HasRevision(commit.String()) {
- message := fmt.Sprintf("new upstream revision '%s'", commit.String())
- conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", message)
- conditions.MarkReconciling(obj, "NewRevision", message)
- }
- return sreconcile.ResultSuccess, nil
-}
-
-// reconcileArtifact archives a new Artifact to the Storage, if the current
-// (Status) data on the object does not match the given.
-//
-// The inspection of the given data to the object is differed, ensuring any
-// stale observations like v1beta2.ArtifactOutdatedCondition are removed.
-// If the given Artifact and/or artifactSet (includes) and the content config
-// checksum do not differ from the object's current, it returns early.
-// Source ignore patterns are loaded, and the given directory is archived while
-// taking these patterns into account.
-// On a successful archive, the Artifact, Includes and new content config
-// checksum in the Status of the object are set, and the symlink in the Storage
-// is updated to its path.
-func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context,
- obj *sourcev1.GitRepository, commit *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) {
-
- // Create potential new artifact with current available metadata
- artifact := r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), commit.String(), fmt.Sprintf("%s.tar.gz", commit.Hash.String()))
-
- // Calculate the content config checksum.
- ccc := r.calculateContentConfigChecksum(obj, includes)
-
- // Set the ArtifactInStorageCondition if there's no drift.
- defer func() {
- if obj.GetArtifact().HasRevision(artifact.Revision) &&
- !includes.Diff(obj.Status.IncludedArtifacts) &&
- obj.Status.ContentConfigChecksum == ccc {
- conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition)
- conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason,
- "stored artifact for revision '%s'", artifact.Revision)
- }
- }()
-
- // The artifact is up-to-date
- if obj.GetArtifact().HasRevision(artifact.Revision) &&
- !includes.Diff(obj.Status.IncludedArtifacts) &&
- obj.Status.ContentConfigChecksum == ccc {
- r.eventLogf(ctx, obj, events.EventTypeTrace, sourcev1.ArtifactUpToDateReason, "artifact up-to-date with remote revision: '%s'", artifact.Revision)
- return sreconcile.ResultSuccess, nil
- }
-
- // Ensure target path exists and is a directory
- if f, err := os.Stat(dir); err != nil {
- e := serror.NewGeneric(
- fmt.Errorf("failed to stat target artifact path: %w", err),
- sourcev1.StatOperationFailedReason,
- )
- conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
- return sreconcile.ResultEmpty, e
- } else if !f.IsDir() {
- e := serror.NewGeneric(
- fmt.Errorf("invalid target path: '%s' is not a directory", dir),
- sourcev1.InvalidPathReason,
- )
- conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
- return sreconcile.ResultEmpty, e
- }
-
- // Ensure artifact directory exists and acquire lock
- if err := r.Storage.MkdirAll(artifact); err != nil {
- e := serror.NewGeneric(
- fmt.Errorf("failed to create artifact directory: %w", err),
- sourcev1.DirCreationFailedReason,
- )
- conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
- return sreconcile.ResultEmpty, e
- }
- unlock, err := r.Storage.Lock(artifact)
- if err != nil {
- return sreconcile.ResultEmpty, serror.NewGeneric(
- fmt.Errorf("failed to acquire lock for artifact: %w", err),
- meta.FailedReason,
- )
- }
- defer unlock()
-
- // Load ignore rules for archiving
- ignoreDomain := strings.Split(dir, string(filepath.Separator))
- ps, err := sourceignore.LoadIgnorePatterns(dir, ignoreDomain)
- if err != nil {
- return sreconcile.ResultEmpty, serror.NewGeneric(
- fmt.Errorf("failed to load source ignore patterns from repository: %w", err),
- "SourceIgnoreError",
- )
- }
- if obj.Spec.Ignore != nil {
- ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*obj.Spec.Ignore), ignoreDomain)...)
- }
-
- // Archive directory to storage
- if err := r.Storage.Archive(&artifact, dir, SourceIgnoreFilter(ps, ignoreDomain)); err != nil {
- e := serror.NewGeneric(
- fmt.Errorf("unable to archive artifact to storage: %w", err),
- sourcev1.ArchiveOperationFailedReason,
- )
- conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
- return sreconcile.ResultEmpty, e
- }
-
- // Record it on the object
- obj.Status.Artifact = artifact.DeepCopy()
- obj.Status.IncludedArtifacts = *includes
- obj.Status.ContentConfigChecksum = ccc
-
- // Update symlink on a "best effort" basis
- url, err := r.Storage.Symlink(artifact, "latest.tar.gz")
- if err != nil {
- r.eventLogf(ctx, obj, events.EventTypeTrace, sourcev1.SymlinkUpdateFailedReason,
- "failed to update status URL symlink: %s", err)
- }
- if url != "" {
- obj.Status.URL = url
- }
- conditions.Delete(obj, sourcev1.StorageOperationFailedCondition)
- return sreconcile.ResultSuccess, nil
-}
-
-// reconcileInclude reconciles the on the object specified
-// v1beta2.GitRepositoryInclude list by copying their Artifact (sub)contents to
-// the specified paths in the given directory.
-//
-// When one of the includes is unavailable, it marks the object with
-// v1beta2.IncludeUnavailableCondition=True and returns early.
-// When the copy operations are successful, it removes the
-// v1beta2.IncludeUnavailableCondition from the object.
-// When the composed artifactSet differs from the current set in the Status of
-// the object, it marks the object with v1beta2.ArtifactOutdatedCondition=True.
-func (r *GitRepositoryReconciler) reconcileInclude(ctx context.Context,
- obj *sourcev1.GitRepository, _ *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) {
-
- for i, incl := range obj.Spec.Include {
- // Do this first as it is much cheaper than copy operations
- toPath, err := securejoin.SecureJoin(dir, incl.GetToPath())
- if err != nil {
- e := serror.NewGeneric(
- fmt.Errorf("path calculation for include '%s' failed: %w", incl.GitRepositoryRef.Name, err),
- "IllegalPath",
- )
- conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
- return sreconcile.ResultEmpty, e
- }
-
- // Get artifact at the same include index. The artifactSet is created
- // such that the index of artifactSet matches with the index of Include.
- // Hence, index is used here to pick the associated artifact from
- // includes.
- var artifact *sourcev1.Artifact
- for j, art := range *includes {
- if i == j {
- artifact = art
- }
- }
-
- // Copy artifact (sub)contents to configured directory.
- if err := r.Storage.CopyToPath(artifact, incl.GetFromPath(), toPath); err != nil {
- e := &serror.Event{
- Err: fmt.Errorf("failed to copy '%s' include from %s to %s: %w", incl.GitRepositoryRef.Name, incl.GetFromPath(), incl.GetToPath(), err),
- Reason: "CopyFailure",
- }
- conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
- return sreconcile.ResultEmpty, e
- }
- }
- conditions.Delete(obj, sourcev1.IncludeUnavailableCondition)
- return sreconcile.ResultSuccess, nil
-}
-
-// gitCheckout builds checkout options with the given configurations and
-// performs a git checkout.
-func (r *GitRepositoryReconciler) gitCheckout(ctx context.Context,
- obj *sourcev1.GitRepository, authOpts *git.AuthOptions, dir string, optimized bool) (*git.Commit, error) {
- // Configure checkout strategy.
- checkoutOpts := git.CheckoutOptions{RecurseSubmodules: obj.Spec.RecurseSubmodules}
- if ref := obj.Spec.Reference; ref != nil {
- checkoutOpts.Branch = ref.Branch
- checkoutOpts.Commit = ref.Commit
- checkoutOpts.Tag = ref.Tag
- checkoutOpts.SemVer = ref.SemVer
- }
-
- // Only if the object has an existing artifact in storage, attempt to
- // short-circuit clone operation. reconcileStorage has already verified
- // that the artifact exists.
- if optimized && conditions.IsTrue(obj, sourcev1.ArtifactInStorageCondition) {
- if artifact := obj.GetArtifact(); artifact != nil {
- checkoutOpts.LastRevision = artifact.Revision
- }
- }
-
- gitCtx, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration)
- defer cancel()
-
- checkoutStrategy, err := strategy.CheckoutStrategyForImplementation(gitCtx,
- git.Implementation(obj.Spec.GitImplementation), checkoutOpts)
- if err != nil {
- // Do not return err as recovery without changes is impossible.
- e := &serror.Stalling{
- Err: fmt.Errorf("failed to configure checkout strategy for Git implementation '%s': %w", obj.Spec.GitImplementation, err),
- Reason: sourcev1.GitOperationFailedReason,
- }
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
- return nil, e
- }
-
- // this is needed only for libgit2, due to managed transport.
- if obj.Spec.GitImplementation == sourcev1.LibGit2Implementation {
- // We set the TransportOptionsURL of this set of authentication options here by constructing
- // a unique URL that won't clash in a multi tenant environment. This unique URL is used by
- // libgit2 managed transports. This enables us to bypass the inbuilt credentials callback in
- // libgit2, which is inflexible and unstable.
- if strings.HasPrefix(obj.Spec.URL, "http") {
- authOpts.TransportOptionsURL = fmt.Sprintf("http://%s/%s/%d", obj.Name, obj.UID, obj.Generation)
- } else if strings.HasPrefix(obj.Spec.URL, "ssh") {
- authOpts.TransportOptionsURL = fmt.Sprintf("ssh://%s/%s/%d", obj.Name, obj.UID, obj.Generation)
- } else {
- e := &serror.Stalling{
- Err: fmt.Errorf("git repository URL '%s' has invalid transport type, supported types are: http, https, ssh", obj.Spec.URL),
- Reason: sourcev1.URLInvalidReason,
- }
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
- return nil, e
- }
- }
-
- commit, err := checkoutStrategy.Checkout(gitCtx, dir, obj.Spec.URL, authOpts)
- if err != nil {
- e := serror.NewGeneric(
- fmt.Errorf("failed to checkout and determine revision: %w", err),
- sourcev1.GitOperationFailedReason,
- )
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
- return nil, e
- }
- return commit, nil
-}
-
-// fetchIncludes fetches artifact metadata of all the included repos.
-func (r *GitRepositoryReconciler) fetchIncludes(ctx context.Context, obj *sourcev1.GitRepository) (*artifactSet, error) {
- artifacts := make(artifactSet, len(obj.Spec.Include))
- for i, incl := range obj.Spec.Include {
- // Retrieve the included GitRepository.
- dep := &sourcev1.GitRepository{}
- if err := r.Get(ctx, types.NamespacedName{Namespace: obj.Namespace, Name: incl.GitRepositoryRef.Name}, dep); err != nil {
- e := serror.NewWaiting(
- fmt.Errorf("could not get resource for include '%s': %w", incl.GitRepositoryRef.Name, err),
- "NotFound",
- )
- e.RequeueAfter = r.requeueDependency
- conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, e.Reason, e.Err.Error())
- return nil, e
- }
-
- // Confirm include has an artifact
- if dep.GetArtifact() == nil {
- e := serror.NewWaiting(
- fmt.Errorf("no artifact available for include '%s'", incl.GitRepositoryRef.Name),
- "NoArtifact",
- )
- e.RequeueAfter = r.requeueDependency
- conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, e.Reason, e.Err.Error())
- return nil, e
- }
-
- artifacts[i] = dep.GetArtifact().DeepCopy()
- }
-
- // We now know all the includes are available.
- conditions.Delete(obj, sourcev1.IncludeUnavailableCondition)
-
- return &artifacts, nil
-}
-
-// calculateContentConfigChecksum calculates a checksum of all the
-// configurations that result in a change in the source artifact. It can be used
-// to decide if further reconciliation is needed when an artifact already exists
-// for a set of configurations.
-func (r *GitRepositoryReconciler) calculateContentConfigChecksum(obj *sourcev1.GitRepository, includes *artifactSet) string {
- c := []byte{}
- // Consider the ignore rules and recurse submodules.
- if obj.Spec.Ignore != nil {
- c = append(c, []byte(*obj.Spec.Ignore)...)
- }
- c = append(c, []byte(strconv.FormatBool(obj.Spec.RecurseSubmodules))...)
-
- // Consider the included repository attributes.
- for _, incl := range obj.Spec.Include {
- c = append(c, []byte(incl.GitRepositoryRef.Name+incl.FromPath+incl.ToPath)...)
- }
-
- // Consider the checksum and revision of all the included remote artifact.
- // This ensures that if the included repos get updated, this checksum changes.
- // NOTE: The content of an artifact may change at the same revision if the
- // ignore rules change. Hence, consider both checksum and revision to
- // capture changes in artifact checksum as well.
- // TODO: Fix artifactSet.Diff() to consider checksum as well.
- if includes != nil {
- for _, incl := range *includes {
- c = append(c, []byte(incl.Checksum)...)
- c = append(c, []byte(incl.Revision)...)
- }
- }
-
- return fmt.Sprintf("sha256:%x", sha256.Sum256(c))
-}
-
-// verifyCommitSignature verifies the signature of the given Git commit, if a
-// verification mode is specified on the object.
-// If the signature can not be verified or the verification fails, it records
-// v1beta2.SourceVerifiedCondition=False and returns.
-// When successful, it records v1beta2.SourceVerifiedCondition=True.
-// If no verification mode is specified on the object, the
-// v1beta2.SourceVerifiedCondition Condition is removed.
-func (r *GitRepositoryReconciler) verifyCommitSignature(ctx context.Context, obj *sourcev1.GitRepository, commit git.Commit) (sreconcile.Result, error) {
- // Check if there is a commit verification is configured and remove any old
- // observations if there is none
- if obj.Spec.Verification == nil || obj.Spec.Verification.Mode == "" {
- conditions.Delete(obj, sourcev1.SourceVerifiedCondition)
- return sreconcile.ResultSuccess, nil
- }
-
- // Get secret with GPG data
- publicKeySecret := types.NamespacedName{
- Namespace: obj.Namespace,
- Name: obj.Spec.Verification.SecretRef.Name,
- }
- secret := &corev1.Secret{}
- if err := r.Client.Get(ctx, publicKeySecret, secret); err != nil {
- e := serror.NewGeneric(
- fmt.Errorf("PGP public keys secret error: %w", err),
- "VerificationError",
- )
- conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, e.Reason, e.Err.Error())
- return sreconcile.ResultEmpty, e
- }
-
- var keyRings []string
- for _, v := range secret.Data {
- keyRings = append(keyRings, string(v))
- }
- // Verify commit with GPG data from secret
- if _, err := commit.Verify(keyRings...); err != nil {
- e := serror.NewGeneric(
- fmt.Errorf("signature verification of commit '%s' failed: %w", commit.Hash.String(), err),
- "InvalidCommitSignature",
- )
- conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, e.Reason, e.Err.Error())
- // Return error in the hope the secret changes
- return sreconcile.ResultEmpty, e
- }
-
- conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, meta.SucceededReason,
- "verified signature of commit '%s'", commit.Hash.String())
- r.eventLogf(ctx, obj, events.EventTypeTrace, "VerifiedCommit",
- "verified signature of commit '%s'", commit.Hash.String())
- return sreconcile.ResultSuccess, nil
-}
-
-// reconcileDelete handles the deletion of the object.
-// It first garbage collects all Artifacts for the object from the Storage.
-// Removing the finalizer from the object if successful.
-func (r *GitRepositoryReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.GitRepository) (sreconcile.Result, error) {
- // Garbage collect the resource's artifacts
- if err := r.garbageCollect(ctx, obj); err != nil {
- // Return the error so we retry the failed garbage collection
- return sreconcile.ResultEmpty, err
- }
-
- // Remove our finalizer from the list
- controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer)
-
- // Stop reconciliation as the object is being deleted
- return sreconcile.ResultEmpty, nil
-}
-
-// garbageCollect performs a garbage collection for the given object.
-//
-// It removes all but the current Artifact from the Storage, unless the
-// deletion timestamp on the object is set. Which will result in the
-// removal of all Artifacts for the objects.
-func (r *GitRepositoryReconciler) garbageCollect(ctx context.Context, obj *sourcev1.GitRepository) error {
- if !obj.DeletionTimestamp.IsZero() {
- if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil {
- return serror.NewGeneric(
- fmt.Errorf("garbage collection for deleted resource failed: %w", err),
- "GarbageCollectionFailed",
- )
- } else if deleted != "" {
- r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded",
- "garbage collected artifacts for deleted resource")
- }
- obj.Status.Artifact = nil
- return nil
- }
- if obj.GetArtifact() != nil {
- delFiles, err := r.Storage.GarbageCollect(ctx, *obj.GetArtifact(), time.Second*5)
- if err != nil {
- return serror.NewGeneric(
- fmt.Errorf("garbage collection of artifacts failed: %w", err),
- "GarbageCollectionFailed",
- )
- }
- if len(delFiles) > 0 {
- r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded",
- fmt.Sprintf("garbage collected %d artifacts", len(delFiles)))
- return nil
- }
- }
- return nil
-}
-
-// eventLogf records events, and logs at the same time.
-//
-// This log is different from the debug log in the EventRecorder, in the sense
-// that this is a simple log. While the debug log contains complete details
-// about the event.
-func (r *GitRepositoryReconciler) eventLogf(ctx context.Context, obj runtime.Object, eventType string, reason string, messageFmt string, args ...interface{}) {
- msg := fmt.Sprintf(messageFmt, args...)
- // Log and emit event.
- if eventType == corev1.EventTypeWarning {
- ctrl.LoggerFrom(ctx).Error(errors.New(reason), msg)
- } else {
- ctrl.LoggerFrom(ctx).Info(msg)
- }
- r.Eventf(obj, eventType, reason, msg)
-}
diff --git a/controllers/gitrepository_controller_test.go b/controllers/gitrepository_controller_test.go
deleted file mode 100644
index bfb857df0..000000000
--- a/controllers/gitrepository_controller_test.go
+++ /dev/null
@@ -1,2214 +0,0 @@
-/*
-Copyright 2020 The Flux authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package controllers
-
-import (
- "context"
- "errors"
- "fmt"
- "net/http"
- "net/url"
- "os"
- "path/filepath"
- "strings"
- "testing"
- "time"
-
- "github.com/darkowlzz/controller-check/status"
- "github.com/fluxcd/pkg/apis/meta"
- "github.com/fluxcd/pkg/gittestserver"
- "github.com/fluxcd/pkg/runtime/conditions"
- "github.com/fluxcd/pkg/runtime/patch"
- "github.com/fluxcd/pkg/ssh"
- "github.com/fluxcd/pkg/testserver"
- "github.com/go-git/go-billy/v5/memfs"
- gogit "github.com/go-git/go-git/v5"
- "github.com/go-git/go-git/v5/config"
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/object"
- "github.com/go-git/go-git/v5/storage/memory"
- . "github.com/onsi/gomega"
- sshtestdata "golang.org/x/crypto/ssh/testdata"
- corev1 "k8s.io/api/core/v1"
- apierrors "k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/client-go/tools/record"
- "k8s.io/utils/pointer"
- kstatus "sigs.k8s.io/cli-utils/pkg/kstatus/status"
- ctrl "sigs.k8s.io/controller-runtime"
- "sigs.k8s.io/controller-runtime/pkg/client"
- "sigs.k8s.io/controller-runtime/pkg/client/fake"
- fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake"
- "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
-
- sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
- serror "github.com/fluxcd/source-controller/internal/error"
- "github.com/fluxcd/source-controller/internal/features"
- sreconcile "github.com/fluxcd/source-controller/internal/reconcile"
- "github.com/fluxcd/source-controller/internal/reconcile/summarize"
- "github.com/fluxcd/source-controller/pkg/git"
- "github.com/fluxcd/source-controller/pkg/git/libgit2/managed"
-)
-
-const (
- encodedCommitFixture = `tree f0c522d8cc4c90b73e2bc719305a896e7e3c108a
-parent eb167bc68d0a11530923b1f24b4978535d10b879
-author Stefan Prodan 1633681364 +0300
-committer Stefan Prodan 1633681364 +0300
-
-Update containerd and runc to fix CVEs
-
-Signed-off-by: Stefan Prodan
-`
- malformedEncodedCommitFixture = `parent eb167bc68d0a11530923b1f24b4978535d10b879
-author Stefan Prodan 1633681364 +0300
-committer Stefan Prodan 1633681364 +0300
-
-Update containerd and runc to fix CVEs
-
-Signed-off-by: Stefan Prodan
-`
- signatureCommitFixture = `-----BEGIN PGP SIGNATURE-----
-
-iHUEABEIAB0WIQQHgExUr4FrLdKzpNYyma6w5AhbrwUCYV//1AAKCRAyma6w5Ahb
-r7nJAQCQU4zEJu04/Q0ac/UaL6htjhq/wTDNMeUM+aWG/LcBogEAqFUea1oR2BJQ
-JCJmEtERFh39zNWSazQmxPAFhEE0kbc=
-=+Wlj
------END PGP SIGNATURE-----`
- armoredKeyRingFixture = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-
-mQSuBF9+HgMRDADKT8UBcSzpTi4JXt/ohhVW3x81AGFPrQvs6MYrcnNJfIkPTJD8
-mY5T7j1fkaN5wcf1wnxM9qTcW8BodkWNGEoEYOtVuigLSxPFqIncxK0PHvdU8ths
-TEInBrgZv9t6xIVa4QngOEUd2D/aYni7M+75z7ntgj6eU1xLZ60upRFn05862OvJ
-rZFUvzjsZXMAO3enCu2VhG/2axCY/5uI8PgWjyiKV2TH4LBJgzlb0v6SyI+fYf5K
-Bg2WzDuLKvQBi9tFSwnUbQoFFlOeiGW8G/bdkoJDWeS1oYgSD3nkmvXvrVESCrbT
-C05OtQOiDXjSpkLim81vNVPtI2XEug+9fEA+jeJakyGwwB+K8xqV3QILKCoWHKGx
-yWcMHSR6cP9tdXCk2JHZBm1PLSJ8hIgMH/YwBJLYg90u8lLAs9WtpVBKkLplzzgm
-B4Z4VxCC+xI1kt+3ZgYvYC+oUXJXrjyAzy+J1f+aWl2+S/79glWgl/xz2VibWMz6
-nZUE+wLMxOQqyOsBALsoE6z81y/7gfn4R/BziBASi1jq/r/wdboFYowmqd39DACX
-+i+V0OplP2TN/F5JajzRgkrlq5cwZHinnw+IFwj9RTfOkdGb3YwhBt/h2PP38969
-ZG+y8muNtaIqih1pXj1fz9HRtsiCABN0j+JYpvV2D2xuLL7P1O0dt5BpJ3KqNCRw
-mGgO2GLxbwvlulsLidCPxdK/M8g9Eeb/xwA5LVwvjVchHkzHuUT7durn7AT0RWiK
-BT8iDfeBB9RKienAbWyybEqRaR6/Tv+mghFIalsDiBPbfm4rsNzsq3ohfByqECiy
-yUvs2O3NDwkoaBDkA3GFyKv8/SVpcuL5OkVxAHNCIMhNzSgotQ3KLcQc0IREfFCa
-3CsBAC7CsE2bJZ9IA9sbBa3jimVhWUQVudRWiLFeYHUF/hjhqS8IHyFwprjEOLaV
-EG0kBO6ELypD/bOsmN9XZLPYyI3y9DM6Vo0KMomE+yK/By/ZMxVfex8/TZreUdhP
-VdCLL95Rc4w9io8qFb2qGtYBij2wm0RWLcM0IhXWAtjI3B17IN+6hmv+JpiZccsM
-AMNR5/RVdXIl0hzr8LROD0Xe4sTyZ+fm3mvpczoDPQNRrWpmI/9OT58itnVmZ5jM
-7djV5y/NjBk63mlqYYfkfWto97wkhg0MnTnOhzdtzSiZQRzj+vf+ilLfIlLnuRr1
-JRV9Skv6xQltcFArx4JyfZCo7JB1ZXcbdFAvIXXS11RTErO0XVrXNm2RenpW/yZA
-9f+ESQ/uUB6XNuyqVUnJDAFJFLdzx8sO3DXo7dhIlgpFqgQobUl+APpbU5LT95sm
-89UrV0Lt9vh7k6zQtKOjEUhm+dErmuBnJo8MvchAuXLagHjvb58vYBCUxVxzt1KG
-2IePwJ/oXIfawNEGad9Lmdo1FYG1u53AKWZmpYOTouu92O50FG2+7dBh0V2vO253
-aIGFRT1r14B1pkCIun7z7B/JELqOkmwmlRrUnxlADZEcQT3z/S8/4+2P7P6kXO7X
-/TAX5xBhSqUbKe3DhJSOvf05/RVL5ULc2U2JFGLAtmBOFmnD/u0qoo5UvWliI+v/
-47QnU3RlZmFuIFByb2RhbiA8c3RlZmFuLnByb2RhbkBnbWFpbC5jb20+iJAEExEI
-ADgWIQQHgExUr4FrLdKzpNYyma6w5AhbrwUCX34eAwIbAwULCQgHAgYVCgkICwIE
-FgIDAQIeAQIXgAAKCRAyma6w5Ahbrzu/AP9l2YpRaWZr6wSQuEn0gMN8DRzsWJPx
-pn0akdY7SRP3ngD9GoKgu41FAItnHAJ2KiHv/fHFyHMndNP3kPGPNW4BF+65Aw0E
-X34eAxAMAMdYFCHmVA8TZxSTMBDpKYave8RiDCMMMjk26Gl0EPN9f2Y+s5++DhiQ
-hojNH9VmJkFwZX1xppxe1y1aLa/U6fBAqMP/IdNH8270iv+A9YIxdsWLmpm99BDO
-3suRfsHcOe9T0x/CwRfDNdGM/enGMhYGTgF4VD58DRDE6WntaBhl4JJa300NG6X0
-GM4Gh59DKWDnez/Shulj8demlWmakP5imCVoY+omOEc2k3nH02U+foqaGG5WxZZ+
-GwEPswm2sBxvn8nwjy9gbQwEtzNI7lWYiz36wCj2VS56Udqt+0eNg8WzocUT0XyI
-moe1qm8YJQ6fxIzaC431DYi/mCDzgx4EV9ww33SXX3Yp2NL6PsdWJWw2QnoqSMpM
-z5otw2KlMgUHkkXEKs0apmK4Hu2b6KD7/ydoQRFUqR38Gb0IZL1tOL6PnbCRUcig
-Aypy016W/WMCjBfQ8qxIGTaj5agX2t28hbiURbxZkCkz+Z3OWkO0Rq3Y2hNAYM5s
-eTn94JIGGwADBgv/dbSZ9LrBvdMwg8pAtdlLtQdjPiT1i9w5NZuQd7OuKhOxYTEB
-NRDTgy4/DgeNThCeOkMB/UQQPtJ3Et45S2YRtnnuvfxgnlz7xlUn765/grtnRk4t
-ONjMmb6tZos1FjIJecB/6h4RsvUd2egvtlpD/Z3YKr6MpNjWg4ji7m27e9pcJfP6
-YpTDrq9GamiHy9FS2F2pZlQxriPpVhjCLVn9tFGBIsXNxxn7SP4so6rJBmyHEAlq
-iym9wl933e0FIgAw5C1vvprYu2amk+jmVBsJjjCmInW5q/kWAFnFaHBvk+v+/7tX
-hywWUI7BqseikgUlkgJ6eU7E9z1DEyuS08x/cViDoNh2ntVUhpnluDu48pdqBvvY
-a4uL/D+KI84THUAJ/vZy+q6G3BEb4hI9pFjgrdJpUKubxyZolmkCFZHjV34uOcTc
-LQr28P8xW8vQbg5DpIsivxYLqDGXt3OyiItxvLMtw/ypt6PkoeP9A4KDST4StITE
-1hrOrPtJ/VRmS2o0iHgEGBEIACAWIQQHgExUr4FrLdKzpNYyma6w5AhbrwUCX34e
-AwIbDAAKCRAyma6w5Ahbr6QWAP9/pl2R6r1nuCnXzewSbnH1OLsXf32hFQAjaQ5o
-Oomb3gD/TRf/nAdVED+k81GdLzciYdUGtI71/qI47G0nMBluLRE=
-=/4e+
------END PGP PUBLIC KEY BLOCK-----
-`
- emptyContentConfigChecksum = "sha256:fcbcf165908dd18a9e49f7ff27810176db8e9f63b4352213741664245224f8aa"
-)
-
-var (
- testGitImplementations = []string{sourcev1.GoGitImplementation, sourcev1.LibGit2Implementation}
-)
-
-func mockTransportNotInitialized() bool {
- return false
-}
-
-func TestGitRepositoryReconciler_Reconcile(t *testing.T) {
- g := NewWithT(t)
-
- server, err := gittestserver.NewTempGitServer()
- g.Expect(err).NotTo(HaveOccurred())
- defer os.RemoveAll(server.Root())
- server.AutoCreate()
- g.Expect(server.StartHTTP()).To(Succeed())
- defer server.StopHTTP()
-
- repoPath := "/test.git"
- _, err = initGitRepo(server, "testdata/git/repository", git.DefaultBranch, repoPath)
- g.Expect(err).NotTo(HaveOccurred())
-
- obj := &sourcev1.GitRepository{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "gitrepository-reconcile-",
- Namespace: "default",
- },
- Spec: sourcev1.GitRepositorySpec{
- Interval: metav1.Duration{Duration: interval},
- URL: server.HTTPAddress() + repoPath,
- },
- }
- g.Expect(testEnv.Create(ctx, obj)).To(Succeed())
-
- key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace}
-
- // Wait for finalizer to be set
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, obj); err != nil {
- return false
- }
- return len(obj.Finalizers) > 0
- }, timeout).Should(BeTrue())
-
- // Wait for GitRepository to be Ready
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, obj); err != nil {
- return false
- }
- if !conditions.IsReady(obj) || obj.Status.Artifact == nil {
- return false
- }
- readyCondition := conditions.Get(obj, meta.ReadyCondition)
- return obj.Generation == readyCondition.ObservedGeneration &&
- obj.Generation == obj.Status.ObservedGeneration
- }, timeout).Should(BeTrue())
-
- // Check if the object status is valid.
- condns := &status.Conditions{NegativePolarity: gitRepositoryReadyCondition.NegativePolarity}
- checker := status.NewChecker(testEnv.Client, condns)
- checker.CheckErr(ctx, obj)
-
- // kstatus client conformance check.
- u, err := patch.ToUnstructured(obj)
- g.Expect(err).ToNot(HaveOccurred())
- res, err := kstatus.Compute(u)
- g.Expect(err).ToNot(HaveOccurred())
- g.Expect(res.Status).To(Equal(kstatus.CurrentStatus))
-
- // Patch the object with reconcile request annotation.
- patchHelper, err := patch.NewHelper(obj, testEnv.Client)
- g.Expect(err).ToNot(HaveOccurred())
- annotations := map[string]string{
- meta.ReconcileRequestAnnotation: "now",
- }
- obj.SetAnnotations(annotations)
- g.Expect(patchHelper.Patch(ctx, obj)).ToNot(HaveOccurred())
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, obj); err != nil {
- return false
- }
- return obj.Status.LastHandledReconcileAt == "now"
- }, timeout).Should(BeTrue())
-
- g.Expect(testEnv.Delete(ctx, obj)).To(Succeed())
-
- // Wait for GitRepository to be deleted
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, obj); err != nil {
- return apierrors.IsNotFound(err)
- }
- return false
- }, timeout).Should(BeTrue())
-}
-
-func TestGitRepositoryReconciler_reconcileSource_authStrategy(t *testing.T) {
- type options struct {
- username string
- password string
- publicKey []byte
- privateKey []byte
- ca []byte
- }
-
- tests := []struct {
- name string
- skipForImplementation string
- protocol string
- server options
- secret *corev1.Secret
- beforeFunc func(obj *sourcev1.GitRepository)
- want sreconcile.Result
- wantErr bool
- assertConditions []metav1.Condition
- }{
- {
- name: "HTTP without secretRef makes ArtifactOutdated=True",
- protocol: "http",
- want: sreconcile.ResultSuccess,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'master/'"),
- *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'master/'"),
- },
- },
- {
- name: "HTTP with Basic Auth secret makes ArtifactOutdated=True",
- protocol: "http",
- server: options{
- username: "git",
- password: "1234",
- },
- secret: &corev1.Secret{
- ObjectMeta: metav1.ObjectMeta{
- Name: "basic-auth",
- },
- Data: map[string][]byte{
- "username": []byte("git"),
- "password": []byte("1234"),
- },
- },
- beforeFunc: func(obj *sourcev1.GitRepository) {
- obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "basic-auth"}
- },
- want: sreconcile.ResultSuccess,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'master/'"),
- *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'master/'"),
- },
- },
- {
- name: "HTTPS with CAFile secret makes ArtifactOutdated=True",
- protocol: "https",
- server: options{
- publicKey: tlsPublicKey,
- privateKey: tlsPrivateKey,
- ca: tlsCA,
- },
- secret: &corev1.Secret{
- ObjectMeta: metav1.ObjectMeta{
- Name: "ca-file",
- },
- Data: map[string][]byte{
- "caFile": tlsCA,
- },
- },
- beforeFunc: func(obj *sourcev1.GitRepository) {
- obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "ca-file"}
- },
- want: sreconcile.ResultSuccess,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'master/'"),
- *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'master/'"),
- },
- },
- {
- name: "HTTPS with invalid CAFile secret makes CheckoutFailed=True and returns error",
- skipForImplementation: sourcev1.LibGit2Implementation,
- protocol: "https",
- server: options{
- publicKey: tlsPublicKey,
- privateKey: tlsPrivateKey,
- ca: tlsCA,
- },
- secret: &corev1.Secret{
- ObjectMeta: metav1.ObjectMeta{
- Name: "invalid-ca",
- },
- Data: map[string][]byte{
- "caFile": []byte("invalid"),
- },
- },
- beforeFunc: func(obj *sourcev1.GitRepository) {
- obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "invalid-ca"}
- },
- wantErr: true,
- assertConditions: []metav1.Condition{
- // The expected error messages may differ when in darwin. In some cases it will match the
- // error message expected in linux: "x509: certificate signed by unknown authority". In
- // other cases it may get "x509: “example.com” certificate is not standards compliant" instead.
- //
- // Trimming the expected error message for consistent results.
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "x509: "),
- },
- },
- {
- name: "HTTPS with invalid CAFile secret makes CheckoutFailed=True and returns error",
- skipForImplementation: sourcev1.GoGitImplementation,
- protocol: "https",
- server: options{
- publicKey: tlsPublicKey,
- privateKey: tlsPrivateKey,
- ca: tlsCA,
- },
- secret: &corev1.Secret{
- ObjectMeta: metav1.ObjectMeta{
- Name: "invalid-ca",
- },
- Data: map[string][]byte{
- "caFile": []byte("invalid"),
- },
- },
- beforeFunc: func(obj *sourcev1.GitRepository) {
- obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "invalid-ca"}
- },
- wantErr: true,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "failed to checkout and determine revision: unable to fetch-connect to remote '': PEM CA bundle could not be appended to x509 certificate pool"),
- },
- },
- {
- name: "SSH with private key secret makes ArtifactOutdated=True",
- protocol: "ssh",
- server: options{
- username: "git",
- },
- secret: &corev1.Secret{
- ObjectMeta: metav1.ObjectMeta{
- Name: "private-key",
- },
- Data: map[string][]byte{
- "username": []byte("git"),
- "identity": sshtestdata.PEMBytes["rsa"],
- },
- },
- beforeFunc: func(obj *sourcev1.GitRepository) {
- obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "private-key"}
- },
- want: sreconcile.ResultSuccess,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'master/'"),
- *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'master/'"),
- },
- },
- {
- name: "SSH with password protected private key secret makes ArtifactOutdated=True",
- protocol: "ssh",
- server: options{
- username: "git",
- },
- secret: &corev1.Secret{
- ObjectMeta: metav1.ObjectMeta{
- Name: "private-key",
- },
- Data: map[string][]byte{
- "username": []byte("git"),
- "identity": sshtestdata.PEMEncryptedKeys[2].PEMBytes,
- "password": []byte("password"),
- },
- },
- beforeFunc: func(obj *sourcev1.GitRepository) {
- obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "private-key"}
- },
- want: sreconcile.ResultSuccess,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new upstream revision 'master/'"),
- *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new upstream revision 'master/'"),
- },
- },
- {
- name: "Include get failure makes CheckoutFailed=True and returns error",
- protocol: "http",
- server: options{
- username: "git",
- },
- beforeFunc: func(obj *sourcev1.GitRepository) {
- obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "non-existing"}
- },
- wantErr: true,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret '/non-existing': secrets \"non-existing\" not found"),
- },
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
-
- obj := &sourcev1.GitRepository{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "auth-strategy-",
- },
- Spec: sourcev1.GitRepositorySpec{
- Interval: metav1.Duration{Duration: interval},
- Timeout: &metav1.Duration{Duration: timeout},
- },
- }
-
- server, err := gittestserver.NewTempGitServer()
- g.Expect(err).NotTo(HaveOccurred())
- defer os.RemoveAll(server.Root())
- server.AutoCreate()
-
- repoPath := "/test.git"
- localRepo, err := initGitRepo(server, "testdata/git/repository", git.DefaultBranch, repoPath)
- g.Expect(err).NotTo(HaveOccurred())
-
- if len(tt.server.username+tt.server.password) > 0 {
- server.Auth(tt.server.username, tt.server.password)
- }
-
- secret := tt.secret.DeepCopy()
- switch tt.protocol {
- case "http":
- g.Expect(server.StartHTTP()).To(Succeed())
- defer server.StopHTTP()
- obj.Spec.URL = server.HTTPAddress() + repoPath
- case "https":
- g.Expect(server.StartHTTPS(tt.server.publicKey, tt.server.privateKey, tt.server.ca, "example.com")).To(Succeed())
- obj.Spec.URL = server.HTTPAddress() + repoPath
- case "ssh":
- server.KeyDir(filepath.Join(server.Root(), "keys"))
-
- g.Expect(server.ListenSSH()).To(Succeed())
- obj.Spec.URL = server.SSHAddress() + repoPath
-
- go func() {
- server.StartSSH()
- }()
- defer server.StopSSH()
-
- if secret != nil && len(secret.Data["known_hosts"]) == 0 {
- u, err := url.Parse(obj.Spec.URL)
- g.Expect(err).NotTo(HaveOccurred())
- g.Expect(u.Host).ToNot(BeEmpty())
- knownHosts, err := ssh.ScanHostKey(u.Host, timeout, git.HostKeyAlgos, false)
- g.Expect(err).NotTo(HaveOccurred())
- secret.Data["known_hosts"] = knownHosts
- }
- default:
- t.Fatalf("unsupported protocol %q", tt.protocol)
- }
-
- if tt.beforeFunc != nil {
- tt.beforeFunc(obj)
- }
-
- builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme())
- if secret != nil {
- builder.WithObjects(secret.DeepCopy())
- }
-
- r := &GitRepositoryReconciler{
- Client: builder.Build(),
- EventRecorder: record.NewFakeRecorder(32),
- Storage: testStorage,
- features: features.FeatureGates(),
- Libgit2TransportInitialized: managed.Enabled,
- }
-
- for _, i := range testGitImplementations {
- t.Run(i, func(t *testing.T) {
- g := NewWithT(t)
-
- if tt.skipForImplementation == i {
- t.Skipf("Skipped for Git implementation %q", i)
- }
-
- tmpDir := t.TempDir()
-
- obj := obj.DeepCopy()
- obj.Spec.GitImplementation = i
-
- head, _ := localRepo.Head()
- assertConditions := tt.assertConditions
- for k := range assertConditions {
- assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", head.Hash().String())
- assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", obj.Spec.URL)
- }
-
- var commit git.Commit
- var includes artifactSet
-
- got, err := r.reconcileSource(context.TODO(), obj, &commit, &includes, tmpDir)
- g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions))
- g.Expect(err != nil).To(Equal(tt.wantErr))
- g.Expect(got).To(Equal(tt.want))
- g.Expect(commit).ToNot(BeNil())
- })
- }
- })
- }
-}
-
-func TestGitRepositoryReconciler_reconcileSource_libgit2TransportUninitialized(t *testing.T) {
- g := NewWithT(t)
-
- r := &GitRepositoryReconciler{
- Client: fakeclient.NewClientBuilder().WithScheme(runtime.NewScheme()).Build(),
- EventRecorder: record.NewFakeRecorder(32),
- Storage: testStorage,
- features: features.FeatureGates(),
- Libgit2TransportInitialized: mockTransportNotInitialized,
- }
-
- obj := &sourcev1.GitRepository{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "libgit2-transport",
- },
- Spec: sourcev1.GitRepositorySpec{
- Interval: metav1.Duration{Duration: interval},
- Timeout: &metav1.Duration{Duration: timeout},
- Reference: &sourcev1.GitRepositoryRef{
- Branch: git.DefaultBranch,
- },
- GitImplementation: sourcev1.LibGit2Implementation,
- },
- }
-
- tmpDir := t.TempDir()
- var commit git.Commit
- var includes artifactSet
- _, err := r.reconcileSource(ctx, obj, &commit, &includes, tmpDir)
- g.Expect(err).To(HaveOccurred())
- g.Expect(err).To(BeAssignableToTypeOf(&serror.Stalling{}))
- g.Expect(err.Error()).To(Equal("libgit2 managed transport not initialized"))
-}
-
-func TestGitRepositoryReconciler_reconcileSource_checkoutStrategy(t *testing.T) {
- g := NewWithT(t)
-
- branches := []string{"staging"}
- tags := []string{"non-semver-tag", "v0.1.0", "0.2.0", "v0.2.1", "v1.0.0-alpha", "v1.1.0", "v2.0.0"}
-
- tests := []struct {
- name string
- skipForImplementation string
- reference *sourcev1.GitRepositoryRef
- beforeFunc func(obj *sourcev1.GitRepository, latestRev string)
- want sreconcile.Result
- wantErr bool
- wantRevision string
- wantArtifactOutdated bool
- }{
- {
- name: "Nil reference (default branch)",
- want: sreconcile.ResultSuccess,
- wantRevision: "master/",
- wantArtifactOutdated: true,
- },
- {
- name: "Branch",
- reference: &sourcev1.GitRepositoryRef{
- Branch: "staging",
- },
- want: sreconcile.ResultSuccess,
- wantRevision: "staging/",
- wantArtifactOutdated: true,
- },
- {
- name: "Tag",
- reference: &sourcev1.GitRepositoryRef{
- Tag: "v0.1.0",
- },
- want: sreconcile.ResultSuccess,
- wantRevision: "v0.1.0/",
- wantArtifactOutdated: true,
- },
- {
- name: "Branch commit",
- skipForImplementation: sourcev1.LibGit2Implementation,
- reference: &sourcev1.GitRepositoryRef{
- Branch: "staging",
- Commit: "",
- },
- want: sreconcile.ResultSuccess,
- wantRevision: "staging/",
- wantArtifactOutdated: true,
- },
- {
- name: "Branch commit",
- skipForImplementation: sourcev1.GoGitImplementation,
- reference: &sourcev1.GitRepositoryRef{
- Branch: "staging",
- Commit: "",
- },
- want: sreconcile.ResultSuccess,
- wantRevision: "HEAD/",
- wantArtifactOutdated: true,
- },
- {
- name: "SemVer",
- reference: &sourcev1.GitRepositoryRef{
- SemVer: "*",
- },
- want: sreconcile.ResultSuccess,
- wantRevision: "v2.0.0/",
- wantArtifactOutdated: true,
- },
- {
- name: "SemVer range",
- reference: &sourcev1.GitRepositoryRef{
- SemVer: "",
- wantArtifactOutdated: true,
- },
- {
- name: "SemVer prerelease",
- reference: &sourcev1.GitRepositoryRef{
- SemVer: ">=1.0.0-0 <1.1.0-0",
- },
- wantRevision: "v1.0.0-alpha/",
- want: sreconcile.ResultSuccess,
- wantArtifactOutdated: true,
- },
- {
- name: "Optimized clone",
- reference: &sourcev1.GitRepositoryRef{
- Branch: "staging",
- },
- beforeFunc: func(obj *sourcev1.GitRepository, latestRev string) {
- // Add existing artifact on the object and storage.
- obj.Status = sourcev1.GitRepositoryStatus{
- Artifact: &sourcev1.Artifact{
- Revision: "staging/" + latestRev,
- Path: randStringRunes(10),
- },
- // Checksum with all the relevant fields unset.
- ContentConfigChecksum: emptyContentConfigChecksum,
- }
- conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "foo")
- },
- want: sreconcile.ResultEmpty,
- wantErr: true,
- wantRevision: "staging/",
- wantArtifactOutdated: false,
- },
- {
- name: "Optimized clone different ignore",
- reference: &sourcev1.GitRepositoryRef{
- Branch: "staging",
- },
- beforeFunc: func(obj *sourcev1.GitRepository, latestRev string) {
- // Set new ignore value.
- obj.Spec.Ignore = pointer.StringPtr("foo")
- // Add existing artifact on the object and storage.
- obj.Status = sourcev1.GitRepositoryStatus{
- Artifact: &sourcev1.Artifact{
- Revision: "staging/" + latestRev,
- Path: randStringRunes(10),
- },
- // Checksum with all the relevant fields unset.
- ContentConfigChecksum: emptyContentConfigChecksum,
- }
- conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "foo")
- },
- want: sreconcile.ResultSuccess,
- wantRevision: "staging/",
- wantArtifactOutdated: false,
- },
- }
-
- server, err := gittestserver.NewTempGitServer()
- g.Expect(err).To(BeNil())
- defer os.RemoveAll(server.Root())
- server.AutoCreate()
- g.Expect(server.StartHTTP()).To(Succeed())
- defer server.StopHTTP()
-
- repoPath := "/test.git"
- localRepo, err := initGitRepo(server, "testdata/git/repository", git.DefaultBranch, repoPath)
- g.Expect(err).NotTo(HaveOccurred())
-
- headRef, err := localRepo.Head()
- g.Expect(err).NotTo(HaveOccurred())
-
- for _, branch := range branches {
- g.Expect(remoteBranchForHead(localRepo, headRef, branch)).To(Succeed())
- }
- for _, tag := range tags {
- g.Expect(remoteTagForHead(localRepo, headRef, tag)).To(Succeed())
- }
-
- r := &GitRepositoryReconciler{
- Client: fakeclient.NewClientBuilder().WithScheme(runtime.NewScheme()).Build(),
- EventRecorder: record.NewFakeRecorder(32),
- Storage: testStorage,
- features: features.FeatureGates(),
- Libgit2TransportInitialized: managed.Enabled,
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- obj := &sourcev1.GitRepository{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "checkout-strategy-",
- },
- Spec: sourcev1.GitRepositorySpec{
- Interval: metav1.Duration{Duration: interval},
- Timeout: &metav1.Duration{Duration: timeout},
- URL: server.HTTPAddress() + repoPath,
- Reference: tt.reference,
- },
- }
-
- if obj.Spec.Reference != nil && obj.Spec.Reference.Commit == "" {
- obj.Spec.Reference.Commit = headRef.Hash().String()
- }
-
- for _, i := range testGitImplementations {
- t.Run(i, func(t *testing.T) {
- g := NewWithT(t)
-
- if tt.skipForImplementation == i {
- t.Skipf("Skipped for Git implementation %q", i)
- }
-
- tmpDir := t.TempDir()
-
- obj := obj.DeepCopy()
- obj.Spec.GitImplementation = i
-
- if tt.beforeFunc != nil {
- tt.beforeFunc(obj, headRef.Hash().String())
- }
-
- var commit git.Commit
- var includes artifactSet
- got, err := r.reconcileSource(ctx, obj, &commit, &includes, tmpDir)
- if err != nil {
- println(err.Error())
- }
- g.Expect(err != nil).To(Equal(tt.wantErr))
- g.Expect(got).To(Equal(tt.want))
- if tt.wantRevision != "" && !tt.wantErr {
- revision := strings.ReplaceAll(tt.wantRevision, "", headRef.Hash().String())
- g.Expect(commit.String()).To(Equal(revision))
- g.Expect(conditions.IsTrue(obj, sourcev1.ArtifactOutdatedCondition)).To(Equal(tt.wantArtifactOutdated))
- }
- })
- }
- })
- }
-}
-
-func TestGitRepositoryReconciler_reconcileArtifact(t *testing.T) {
- tests := []struct {
- name string
- dir string
- includes artifactSet
- beforeFunc func(obj *sourcev1.GitRepository)
- afterFunc func(t *WithT, obj *sourcev1.GitRepository)
- want sreconcile.Result
- wantErr bool
- assertConditions []metav1.Condition
- }{
- {
- name: "Archiving artifact to storage makes ArtifactInStorage=True",
- dir: "testdata/git/repository",
- beforeFunc: func(obj *sourcev1.GitRepository) {
- obj.Spec.Interval = metav1.Duration{Duration: interval}
- },
- afterFunc: func(t *WithT, obj *sourcev1.GitRepository) {
- t.Expect(obj.GetArtifact()).ToNot(BeNil())
- t.Expect(obj.Status.URL).ToNot(BeEmpty())
- },
- want: sreconcile.ResultSuccess,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'main/revision'"),
- },
- },
- {
- name: "Archiving artifact to storage with includes makes ArtifactInStorage=True",
- dir: "testdata/git/repository",
- includes: artifactSet{&sourcev1.Artifact{Revision: "main/revision"}},
- beforeFunc: func(obj *sourcev1.GitRepository) {
- obj.Spec.Interval = metav1.Duration{Duration: interval}
- },
- afterFunc: func(t *WithT, obj *sourcev1.GitRepository) {
- t.Expect(obj.GetArtifact()).ToNot(BeNil())
- t.Expect(obj.GetArtifact().Checksum).To(Equal("60a3bf69f337cb5ec9ebd00abefbb6e7f2a2cf27158ecf438d52b2035b184172"))
- t.Expect(obj.Status.IncludedArtifacts).ToNot(BeEmpty())
- t.Expect(obj.Status.URL).ToNot(BeEmpty())
- },
- want: sreconcile.ResultSuccess,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'main/revision'"),
- },
- },
- {
- name: "Up-to-date artifact should not update status",
- dir: "testdata/git/repository",
- includes: artifactSet{&sourcev1.Artifact{Revision: "main/revision"}},
- beforeFunc: func(obj *sourcev1.GitRepository) {
- obj.Spec.Interval = metav1.Duration{Duration: interval}
- obj.Status.Artifact = &sourcev1.Artifact{Revision: "main/revision"}
- obj.Status.IncludedArtifacts = []*sourcev1.Artifact{{Revision: "main/revision", Checksum: "some-checksum"}}
- obj.Status.ContentConfigChecksum = "sha256:f825d11a1c5987e033d2cb36449a3b0435a6abc9b2bfdbcdcc7c49bf40e9285d"
- },
- afterFunc: func(t *WithT, obj *sourcev1.GitRepository) {
- t.Expect(obj.Status.URL).To(BeEmpty())
- },
- want: sreconcile.ResultSuccess,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'main/revision'"),
- },
- },
- {
- name: "Spec ignore overwrite is taken into account",
- dir: "testdata/git/repository",
- beforeFunc: func(obj *sourcev1.GitRepository) {
- obj.Spec.Interval = metav1.Duration{Duration: interval}
- obj.Spec.Ignore = pointer.StringPtr("!**.txt\n")
- },
- afterFunc: func(t *WithT, obj *sourcev1.GitRepository) {
- t.Expect(obj.GetArtifact()).ToNot(BeNil())
- t.Expect(obj.GetArtifact().Checksum).To(Equal("11f7f007dce5619bd79e6c57688261058d09f5271e802463ac39f2b9ead7cabd"))
- },
- want: sreconcile.ResultSuccess,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'main/revision'"),
- },
- },
- {
- name: "source ignore for subdir ignore patterns",
- dir: "testdata/git/repowithsubdirs",
- beforeFunc: func(obj *sourcev1.GitRepository) {
- obj.Spec.Interval = metav1.Duration{Duration: interval}
- },
- afterFunc: func(t *WithT, obj *sourcev1.GitRepository) {
- t.Expect(obj.GetArtifact()).ToNot(BeNil())
- t.Expect(obj.GetArtifact().Checksum).To(Equal("29186e024dde5a414cfc990829c6b2e85f6b3bd2d950f50ca9f418f5d2261d79"))
- },
- want: sreconcile.ResultSuccess,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'main/revision'"),
- },
- },
- {
- name: "Removes ArtifactOutdatedCondition after creating new artifact",
- dir: "testdata/git/repository",
- beforeFunc: func(obj *sourcev1.GitRepository) {
- obj.Spec.Interval = metav1.Duration{Duration: interval}
- conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "")
- },
- afterFunc: func(t *WithT, obj *sourcev1.GitRepository) {
- t.Expect(obj.GetArtifact()).ToNot(BeNil())
- t.Expect(obj.GetArtifact().Checksum).To(Equal("60a3bf69f337cb5ec9ebd00abefbb6e7f2a2cf27158ecf438d52b2035b184172"))
- t.Expect(obj.Status.URL).ToNot(BeEmpty())
- },
- want: sreconcile.ResultSuccess,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'main/revision'"),
- },
- },
- {
- name: "Creates latest symlink to the created artifact",
- dir: "testdata/git/repository",
- beforeFunc: func(obj *sourcev1.GitRepository) {
- obj.Spec.Interval = metav1.Duration{Duration: interval}
- },
- afterFunc: func(t *WithT, obj *sourcev1.GitRepository) {
- t.Expect(obj.GetArtifact()).ToNot(BeNil())
-
- localPath := testStorage.LocalPath(*obj.GetArtifact())
- symlinkPath := filepath.Join(filepath.Dir(localPath), "latest.tar.gz")
- targetFile, err := os.Readlink(symlinkPath)
- t.Expect(err).NotTo(HaveOccurred())
- t.Expect(localPath).To(Equal(targetFile))
- },
- want: sreconcile.ResultSuccess,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'main/revision'"),
- },
- },
- {
- name: "Target path does not exists",
- dir: "testdata/git/foo",
- wantErr: true,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.StatOperationFailedReason, "failed to stat target artifact path"),
- },
- },
- {
- name: "Target path is not a directory",
- dir: "testdata/git/repository/foo.txt",
- wantErr: true,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.InvalidPathReason, "invalid target path"),
- },
- },
- }
- artifactSize := func(g *WithT, artifactURL string) *int64 {
- if artifactURL == "" {
- return nil
- }
- res, err := http.Get(artifactURL)
- g.Expect(err).NotTo(HaveOccurred())
- g.Expect(res.StatusCode).To(Equal(http.StatusOK))
- defer res.Body.Close()
- return &res.ContentLength
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
-
- resetChmod(tt.dir, 0o755, 0o644)
-
- r := &GitRepositoryReconciler{
- EventRecorder: record.NewFakeRecorder(32),
- Storage: testStorage,
- features: features.FeatureGates(),
- }
-
- obj := &sourcev1.GitRepository{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "reconcile-artifact-",
- Generation: 1,
- },
- Status: sourcev1.GitRepositoryStatus{},
- }
-
- if tt.beforeFunc != nil {
- tt.beforeFunc(obj)
- }
-
- commit := git.Commit{
- Hash: []byte("revision"),
- Reference: "refs/heads/main",
- }
-
- got, err := r.reconcileArtifact(ctx, obj, &commit, &tt.includes, tt.dir)
- g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions))
- g.Expect(err != nil).To(Equal(tt.wantErr))
- g.Expect(got).To(Equal(tt.want))
-
- if obj.Status.Artifact != nil {
- g.Expect(obj.Status.Artifact.Size).To(Equal(artifactSize(g, obj.Status.Artifact.URL)))
- }
-
- if tt.afterFunc != nil {
- tt.afterFunc(g, obj)
- }
- })
- }
-}
-
-func TestGitRepositoryReconciler_reconcileInclude(t *testing.T) {
- g := NewWithT(t)
-
- server, err := testserver.NewTempArtifactServer()
- g.Expect(err).NotTo(HaveOccurred())
- storage, err := newTestStorage(server.HTTPServer)
- g.Expect(err).NotTo(HaveOccurred())
- defer os.RemoveAll(storage.BasePath)
-
- dependencyInterval := 5 * time.Second
-
- type dependency struct {
- name string
- withArtifact bool
- conditions []metav1.Condition
- }
-
- type include struct {
- name string
- fromPath string
- toPath string
- shouldExist bool
- }
-
- tests := []struct {
- name string
- dependencies []dependency
- includes []include
- beforeFunc func(obj *sourcev1.GitRepository)
- want sreconcile.Result
- wantErr bool
- assertConditions []metav1.Condition
- }{
- {
- name: "New includes make ArtifactOutdated=True",
- dependencies: []dependency{
- {
- name: "a",
- withArtifact: true,
- conditions: []metav1.Condition{
- *conditions.TrueCondition(meta.ReadyCondition, "Foo", "foo ready"),
- },
- },
- {
- name: "b",
- withArtifact: true,
- conditions: []metav1.Condition{
- *conditions.TrueCondition(meta.ReadyCondition, "Bar", "bar ready"),
- },
- },
- },
- includes: []include{
- {name: "a", toPath: "a/", shouldExist: true},
- {name: "b", toPath: "b/", shouldExist: true},
- },
- want: sreconcile.ResultSuccess,
- },
- {
- name: "Invalid FromPath makes IncludeUnavailable=True and returns error",
- dependencies: []dependency{
- {
- name: "a",
- withArtifact: true,
- },
- },
- includes: []include{
- {name: "a", fromPath: "../../../path", shouldExist: false},
- },
- wantErr: true,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, "CopyFailure", "unpack/path: no such file or directory"),
- },
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
-
- var depObjs []client.Object
- for _, d := range tt.dependencies {
- obj := &sourcev1.GitRepository{
- ObjectMeta: metav1.ObjectMeta{
- Name: d.name,
- },
- Status: sourcev1.GitRepositoryStatus{
- Conditions: d.conditions,
- },
- }
- if d.withArtifact {
- obj.Status.Artifact = &sourcev1.Artifact{
- Path: d.name + ".tar.gz",
- Revision: d.name,
- LastUpdateTime: metav1.Now(),
- }
- g.Expect(storage.Archive(obj.GetArtifact(), "testdata/git/repository", nil)).To(Succeed())
- }
- depObjs = append(depObjs, obj)
- }
-
- builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme())
- if len(tt.dependencies) > 0 {
- builder.WithObjects(depObjs...)
- }
-
- r := &GitRepositoryReconciler{
- Client: builder.Build(),
- EventRecorder: record.NewFakeRecorder(32),
- Storage: storage,
- requeueDependency: dependencyInterval,
- features: features.FeatureGates(),
- }
-
- obj := &sourcev1.GitRepository{
- ObjectMeta: metav1.ObjectMeta{
- Name: "reconcile-include",
- },
- Spec: sourcev1.GitRepositorySpec{
- Interval: metav1.Duration{Duration: interval},
- },
- }
-
- for i, incl := range tt.includes {
- incl := sourcev1.GitRepositoryInclude{
- GitRepositoryRef: meta.LocalObjectReference{Name: incl.name},
- FromPath: incl.fromPath,
- ToPath: incl.toPath,
- }
- tt.includes[i].fromPath = incl.GetFromPath()
- tt.includes[i].toPath = incl.GetToPath()
- obj.Spec.Include = append(obj.Spec.Include, incl)
- }
-
- if tt.beforeFunc != nil {
- tt.beforeFunc(obj)
- }
-
- tmpDir := t.TempDir()
-
- var commit git.Commit
- var includes artifactSet
-
- // Build includes artifactSet.
- artifactSet, err := r.fetchIncludes(ctx, obj)
- g.Expect(err).ToNot(HaveOccurred())
- includes = *artifactSet
-
- got, err := r.reconcileInclude(ctx, obj, &commit, &includes, tmpDir)
- g.Expect(obj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions))
- g.Expect(err != nil).To(Equal(tt.wantErr))
- if err == nil {
- g.Expect(len(includes)).To(Equal(len(tt.includes)))
- }
- g.Expect(got).To(Equal(tt.want))
- for _, i := range tt.includes {
- if i.toPath != "" {
- expect := g.Expect(filepath.Join(tmpDir, i.toPath))
- if i.shouldExist {
- expect.To(BeADirectory())
- } else {
- expect.NotTo(BeADirectory())
- }
- }
- if i.shouldExist {
- g.Expect(filepath.Join(tmpDir, i.toPath)).Should(BeADirectory())
- } else {
- g.Expect(filepath.Join(tmpDir, i.toPath)).ShouldNot(BeADirectory())
- }
- }
- })
- }
-}
-
-func TestGitRepositoryReconciler_reconcileStorage(t *testing.T) {
- tests := []struct {
- name string
- beforeFunc func(obj *sourcev1.GitRepository, storage *Storage) error
- want sreconcile.Result
- wantErr bool
- assertArtifact *sourcev1.Artifact
- assertConditions []metav1.Condition
- assertPaths []string
- }{
- {
- name: "garbage collects",
- beforeFunc: func(obj *sourcev1.GitRepository, storage *Storage) error {
- revisions := []string{"a", "b", "c", "d"}
- for n := range revisions {
- v := revisions[n]
- obj.Status.Artifact = &sourcev1.Artifact{
- Path: fmt.Sprintf("/reconcile-storage/%s.txt", v),
- Revision: v,
- }
- if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil {
- return err
- }
- if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(v), 0o640); err != nil {
- return err
- }
- if n != len(revisions)-1 {
- time.Sleep(time.Second * 1)
- }
- }
- testStorage.SetArtifactURL(obj.Status.Artifact)
- return nil
- },
- assertArtifact: &sourcev1.Artifact{
- Path: "/reconcile-storage/d.txt",
- Revision: "d",
- Checksum: "18ac3e7343f016890c510e93f935261169d9e3f565436429830faf0934f4f8e4",
- URL: testStorage.Hostname + "/reconcile-storage/d.txt",
- Size: int64p(int64(len("d"))),
- },
- assertPaths: []string{
- "/reconcile-storage/d.txt",
- "/reconcile-storage/c.txt",
- "!/reconcile-storage/b.txt",
- "!/reconcile-storage/a.txt",
- },
- want: sreconcile.ResultSuccess,
- },
- {
- name: "notices missing artifact in storage",
- beforeFunc: func(obj *sourcev1.GitRepository, storage *Storage) error {
- obj.Status.Artifact = &sourcev1.Artifact{
- Path: "/reconcile-storage/invalid.txt",
- Revision: "e",
- }
- testStorage.SetArtifactURL(obj.Status.Artifact)
- return nil
- },
- want: sreconcile.ResultSuccess,
- assertPaths: []string{
- "!/reconcile-storage/invalid.txt",
- },
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(meta.ReconcilingCondition, "NoArtifact", "no artifact for resource in storage"),
- },
- },
- {
- name: "updates hostname on diff from current",
- beforeFunc: func(obj *sourcev1.GitRepository, storage *Storage) error {
- obj.Status.Artifact = &sourcev1.Artifact{
- Path: "/reconcile-storage/hostname.txt",
- Revision: "f",
- Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80",
- URL: "http://outdated.com/reconcile-storage/hostname.txt",
- }
- if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil {
- return err
- }
- if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader("file"), 0o640); err != nil {
- return err
- }
- return nil
- },
- want: sreconcile.ResultSuccess,
- assertPaths: []string{
- "/reconcile-storage/hostname.txt",
- },
- assertArtifact: &sourcev1.Artifact{
- Path: "/reconcile-storage/hostname.txt",
- Revision: "f",
- Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80",
- URL: testStorage.Hostname + "/reconcile-storage/hostname.txt",
- Size: int64p(int64(len("file"))),
- },
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
-
- defer func() {
- g.Expect(os.RemoveAll(filepath.Join(testStorage.BasePath, "/reconcile-storage"))).To(Succeed())
- }()
-
- r := &GitRepositoryReconciler{
- EventRecorder: record.NewFakeRecorder(32),
- Storage: testStorage,
- features: features.FeatureGates(),
- }
-
- obj := &sourcev1.GitRepository{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "test-",
- },
- }
- if tt.beforeFunc != nil {
- g.Expect(tt.beforeFunc(obj, testStorage)).To(Succeed())
- }
-
- var c *git.Commit
- var as artifactSet
- got, err := r.reconcileStorage(context.TODO(), obj, c, &as, "")
- g.Expect(err != nil).To(Equal(tt.wantErr))
- g.Expect(got).To(Equal(tt.want))
-
- g.Expect(obj.Status.Artifact).To(MatchArtifact(tt.assertArtifact))
- if tt.assertArtifact != nil && tt.assertArtifact.URL != "" {
- g.Expect(obj.Status.Artifact.URL).To(Equal(tt.assertArtifact.URL))
- }
- g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions))
-
- for _, p := range tt.assertPaths {
- absoluteP := filepath.Join(testStorage.BasePath, p)
- if !strings.HasPrefix(p, "!") {
- g.Expect(absoluteP).To(BeAnExistingFile())
- continue
- }
- g.Expect(absoluteP).NotTo(BeAnExistingFile())
- }
- })
- }
-}
-
-func TestGitRepositoryReconciler_reconcileDelete(t *testing.T) {
- g := NewWithT(t)
-
- r := &GitRepositoryReconciler{
- EventRecorder: record.NewFakeRecorder(32),
- Storage: testStorage,
- features: features.FeatureGates(),
- }
-
- obj := &sourcev1.GitRepository{
- ObjectMeta: metav1.ObjectMeta{
- Name: "reconcile-delete-",
- DeletionTimestamp: &metav1.Time{Time: time.Now()},
- Finalizers: []string{
- sourcev1.SourceFinalizer,
- },
- },
- Status: sourcev1.GitRepositoryStatus{},
- }
-
- artifact := testStorage.NewArtifactFor(sourcev1.GitRepositoryKind, obj.GetObjectMeta(), "revision", "foo.txt")
- obj.Status.Artifact = &artifact
-
- got, err := r.reconcileDelete(ctx, obj)
- g.Expect(err).NotTo(HaveOccurred())
- g.Expect(got).To(Equal(sreconcile.ResultEmpty))
- g.Expect(controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer)).To(BeFalse())
- g.Expect(obj.Status.Artifact).To(BeNil())
-}
-
-func TestGitRepositoryReconciler_verifyCommitSignature(t *testing.T) {
- tests := []struct {
- name string
- secret *corev1.Secret
- commit git.Commit
- beforeFunc func(obj *sourcev1.GitRepository)
- want sreconcile.Result
- wantErr bool
- assertConditions []metav1.Condition
- }{
- {
- name: "Valid commit makes SourceVerifiedCondition=True",
- secret: &corev1.Secret{
- ObjectMeta: metav1.ObjectMeta{
- Name: "existing",
- },
- Data: map[string][]byte{
- "foo": []byte(armoredKeyRingFixture),
- },
- },
- commit: git.Commit{
- Hash: []byte("shasum"),
- Encoded: []byte(encodedCommitFixture),
- Signature: signatureCommitFixture,
- },
- beforeFunc: func(obj *sourcev1.GitRepository) {
- obj.Spec.Interval = metav1.Duration{Duration: interval}
- obj.Spec.Verification = &sourcev1.GitRepositoryVerification{
- Mode: "head",
- SecretRef: meta.LocalObjectReference{
- Name: "existing",
- },
- }
- },
- want: sreconcile.ResultSuccess,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of commit 'shasum'"),
- },
- },
- {
- name: "Invalid commit sets no SourceVerifiedCondition and returns error",
- secret: &corev1.Secret{
- ObjectMeta: metav1.ObjectMeta{
- Name: "existing",
- },
- },
- commit: git.Commit{
- Hash: []byte("shasum"),
- Encoded: []byte(malformedEncodedCommitFixture),
- Signature: signatureCommitFixture,
- },
- beforeFunc: func(obj *sourcev1.GitRepository) {
- obj.Spec.Interval = metav1.Duration{Duration: interval}
- obj.Spec.Verification = &sourcev1.GitRepositoryVerification{
- Mode: "head",
- SecretRef: meta.LocalObjectReference{
- Name: "existing",
- },
- }
- },
- wantErr: true,
- assertConditions: []metav1.Condition{
- *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, "InvalidCommitSignature", "signature verification of commit 'shasum' failed: failed to verify commit with any of the given key rings"),
- },
- },
- {
- name: "Secret get failure sets no SourceVerifiedCondition and returns error",
- beforeFunc: func(obj *sourcev1.GitRepository) {
- obj.Spec.Interval = metav1.Duration{Duration: interval}
- obj.Spec.Verification = &sourcev1.GitRepositoryVerification{
- Mode: "head",
- SecretRef: meta.LocalObjectReference{
- Name: "none-existing",
- },
- }
- },
- wantErr: true,
- assertConditions: []metav1.Condition{
- *conditions.FalseCondition(sourcev1.SourceVerifiedCondition, "VerificationError", "PGP public keys secret error: secrets \"none-existing\" not found"),
- },
- },
- {
- name: "Nil verification in spec deletes SourceVerified condition",
- beforeFunc: func(obj *sourcev1.GitRepository) {
- obj.Spec.Interval = metav1.Duration{Duration: interval}
- conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Foo", "")
- },
- want: sreconcile.ResultSuccess,
- assertConditions: []metav1.Condition{},
- },
- {
- name: "Empty verification mode in spec deletes SourceVerified condition",
- beforeFunc: func(obj *sourcev1.GitRepository) {
- obj.Spec.Interval = metav1.Duration{Duration: interval}
- obj.Spec.Verification = &sourcev1.GitRepositoryVerification{}
- conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Foo", "")
- },
- want: sreconcile.ResultSuccess,
- assertConditions: []metav1.Condition{},
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
-
- builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme())
- if tt.secret != nil {
- builder.WithObjects(tt.secret)
- }
-
- r := &GitRepositoryReconciler{
- EventRecorder: record.NewFakeRecorder(32),
- Client: builder.Build(),
- features: features.FeatureGates(),
- }
-
- obj := &sourcev1.GitRepository{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "verify-commit-",
- Generation: 1,
- },
- Status: sourcev1.GitRepositoryStatus{},
- }
-
- if tt.beforeFunc != nil {
- tt.beforeFunc(obj)
- }
-
- got, err := r.verifyCommitSignature(context.TODO(), obj, tt.commit)
- g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions))
- g.Expect(err != nil).To(Equal(tt.wantErr))
- g.Expect(got).To(Equal(tt.want))
- })
- }
-}
-
-func TestGitRepositoryReconciler_ConditionsUpdate(t *testing.T) {
- g := NewWithT(t)
-
- server, err := gittestserver.NewTempGitServer()
- g.Expect(err).NotTo(HaveOccurred())
- defer os.RemoveAll(server.Root())
- server.AutoCreate()
- g.Expect(server.StartHTTP()).To(Succeed())
- defer server.StopHTTP()
-
- repoPath := "/test.git"
- _, err = initGitRepo(server, "testdata/git/repository", git.DefaultBranch, repoPath)
- g.Expect(err).NotTo(HaveOccurred())
-
- tests := []struct {
- name string
- beforeFunc func(obj *sourcev1.GitRepository)
- want ctrl.Result
- wantErr bool
- assertConditions []metav1.Condition
- }{
- {
- name: "no failure condition",
- want: ctrl.Result{RequeueAfter: interval},
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "stored artifact for revision"),
- *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, "Succeeded", "stored artifact for revision"),
- },
- },
- {
- name: "reconciling condition",
- beforeFunc: func(obj *sourcev1.GitRepository) {
- conditions.MarkTrue(obj, meta.ReconcilingCondition, "Foo", "")
- },
- want: ctrl.Result{RequeueAfter: interval},
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "stored artifact for revision"),
- *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, "Succeeded", "stored artifact for revision"),
- },
- },
- {
- name: "stalled condition",
- beforeFunc: func(obj *sourcev1.GitRepository) {
- conditions.MarkTrue(obj, meta.StalledCondition, "Foo", "")
- },
- want: ctrl.Result{RequeueAfter: interval},
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "stored artifact for revision"),
- *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, "Succeeded", "stored artifact for revision"),
- },
- },
- {
- name: "mixed failed conditions",
- beforeFunc: func(obj *sourcev1.GitRepository) {
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, "Foo", "")
- conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "Foo", "")
- conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, "Foo", "")
- conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "")
- },
- want: ctrl.Result{RequeueAfter: interval},
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "stored artifact for revision"),
- *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, "Succeeded", "stored artifact for revision"),
- },
- },
- {
- name: "reconciling and failed conditions",
- beforeFunc: func(obj *sourcev1.GitRepository) {
- conditions.MarkTrue(obj, meta.ReconcilingCondition, "Foo", "")
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, "Foo", "")
- },
- want: ctrl.Result{RequeueAfter: interval},
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "stored artifact for revision"),
- *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, "Succeeded", "stored artifact for revision"),
- },
- },
- {
- name: "stalled and failed conditions",
- beforeFunc: func(obj *sourcev1.GitRepository) {
- conditions.MarkTrue(obj, meta.StalledCondition, "Foo", "")
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, "Foo", "")
- },
- want: ctrl.Result{RequeueAfter: interval},
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(meta.ReadyCondition, "Succeeded", "stored artifact for revision"),
- *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, "Succeeded", "stored artifact for revision"),
- },
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
-
- obj := &sourcev1.GitRepository{
- ObjectMeta: metav1.ObjectMeta{
- Name: "condition-update",
- Namespace: "default",
- Finalizers: []string{sourcev1.SourceFinalizer},
- },
- Spec: sourcev1.GitRepositorySpec{
- URL: server.HTTPAddress() + repoPath,
- GitImplementation: sourcev1.GoGitImplementation,
- Interval: metav1.Duration{Duration: interval},
- Timeout: &metav1.Duration{Duration: timeout},
- },
- }
-
- if tt.beforeFunc != nil {
- tt.beforeFunc(obj)
- }
-
- builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme()).WithObjects(obj)
-
- r := &GitRepositoryReconciler{
- Client: builder.Build(),
- EventRecorder: record.NewFakeRecorder(32),
- Storage: testStorage,
- features: features.FeatureGates(),
- Libgit2TransportInitialized: managed.Enabled,
- }
-
- key := client.ObjectKeyFromObject(obj)
- res, err := r.Reconcile(context.TODO(), ctrl.Request{NamespacedName: key})
- g.Expect(err != nil).To(Equal(tt.wantErr))
- g.Expect(res).To(Equal(tt.want))
-
- updatedObj := &sourcev1.GitRepository{}
- err = r.Get(ctx, key, updatedObj)
- g.Expect(err).NotTo(HaveOccurred())
- g.Expect(updatedObj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions))
- })
- }
-}
-
-// helpers
-
-func initGitRepo(server *gittestserver.GitServer, fixture, branch, repositoryPath string) (*gogit.Repository, error) {
- fs := memfs.New()
- repo, err := gogit.Init(memory.NewStorage(), fs)
- if err != nil {
- return nil, err
- }
-
- branchRef := plumbing.NewBranchReferenceName(branch)
- if err = repo.CreateBranch(&config.Branch{
- Name: branch,
- Remote: gogit.DefaultRemoteName,
- Merge: branchRef,
- }); err != nil {
- return nil, err
- }
-
- err = commitFromFixture(repo, fixture)
- if err != nil {
- return nil, err
- }
-
- if server.HTTPAddress() == "" {
- if err = server.StartHTTP(); err != nil {
- return nil, err
- }
- defer server.StopHTTP()
- }
- if _, err = repo.CreateRemote(&config.RemoteConfig{
- Name: gogit.DefaultRemoteName,
- URLs: []string{server.HTTPAddressWithCredentials() + repositoryPath},
- }); err != nil {
- return nil, err
- }
-
- if err = repo.Push(&gogit.PushOptions{
- RefSpecs: []config.RefSpec{"refs/heads/*:refs/heads/*"},
- }); err != nil {
- return nil, err
- }
-
- return repo, nil
-}
-
-func Test_commitFromFixture(t *testing.T) {
- g := NewWithT(t)
-
- repo, err := gogit.Init(memory.NewStorage(), memfs.New())
- g.Expect(err).ToNot(HaveOccurred())
-
- err = commitFromFixture(repo, "testdata/git/repository")
- g.Expect(err).ToNot(HaveOccurred())
-}
-
-func commitFromFixture(repo *gogit.Repository, fixture string) error {
- working, err := repo.Worktree()
- if err != nil {
- return err
- }
- fs := working.Filesystem
-
- if err = filepath.Walk(fixture, func(path string, info os.FileInfo, err error) error {
- if err != nil {
- return err
- }
- if info.IsDir() {
- return fs.MkdirAll(fs.Join(path[len(fixture):]), info.Mode())
- }
-
- fileBytes, err := os.ReadFile(path)
- if err != nil {
- return err
- }
-
- ff, err := fs.Create(path[len(fixture):])
- if err != nil {
- return err
- }
- defer ff.Close()
-
- _, err = ff.Write(fileBytes)
- return err
- }); err != nil {
- return err
- }
-
- _, err = working.Add(".")
- if err != nil {
- return err
- }
-
- if _, err = working.Commit("Fixtures from "+fixture, &gogit.CommitOptions{
- Author: &object.Signature{
- Name: "Jane Doe",
- Email: "jane@example.com",
- When: time.Now(),
- },
- }); err != nil {
- return err
- }
-
- return nil
-}
-
-func remoteBranchForHead(repo *gogit.Repository, head *plumbing.Reference, branch string) error {
- refSpec := fmt.Sprintf("%s:refs/heads/%s", head.Name(), branch)
- return repo.Push(&gogit.PushOptions{
- RemoteName: "origin",
- RefSpecs: []config.RefSpec{config.RefSpec(refSpec)},
- Force: true,
- })
-}
-
-func remoteTagForHead(repo *gogit.Repository, head *plumbing.Reference, tag string) error {
- if _, err := repo.CreateTag(tag, head.Hash(), &gogit.CreateTagOptions{
- // Not setting this seems to make things flaky
- // Expected success, but got an error:
- // <*errors.errorString | 0xc0000f6350>: {
- // s: "tagger field is required",
- // }
- // tagger field is required
- Tagger: &object.Signature{
- Name: "Jane Doe",
- Email: "jane@example.com",
- When: time.Now(),
- },
- Message: tag,
- }); err != nil {
- return err
- }
- refSpec := fmt.Sprintf("refs/tags/%[1]s:refs/tags/%[1]s", tag)
- return repo.Push(&gogit.PushOptions{
- RefSpecs: []config.RefSpec{config.RefSpec(refSpec)},
- })
-}
-
-func TestGitRepositoryReconciler_statusConditions(t *testing.T) {
- tests := []struct {
- name string
- beforeFunc func(obj *sourcev1.GitRepository)
- assertConditions []metav1.Condition
- }{
- {
- name: "multiple positive conditions",
- beforeFunc: func(obj *sourcev1.GitRepository) {
- conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision")
- conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of commit")
- },
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision"),
- *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision"),
- *conditions.TrueCondition(sourcev1.SourceVerifiedCondition, meta.SucceededReason, "verified signature of commit"),
- },
- },
- {
- name: "multiple failures",
- beforeFunc: func(obj *sourcev1.GitRepository) {
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret")
- conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "IllegalPath", "some error")
- conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory")
- conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "some error")
- },
- assertConditions: []metav1.Condition{
- *conditions.FalseCondition(meta.ReadyCondition, sourcev1.DirCreationFailedReason, "failed to create directory"),
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"),
- *conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "IllegalPath", "some error"),
- *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory"),
- *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "some error"),
- },
- },
- {
- name: "mixed positive and negative conditions",
- beforeFunc: func(obj *sourcev1.GitRepository) {
- conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision")
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret")
- },
- assertConditions: []metav1.Condition{
- *conditions.FalseCondition(meta.ReadyCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"),
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"),
- *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision"),
- },
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
-
- obj := &sourcev1.GitRepository{
- TypeMeta: metav1.TypeMeta{
- Kind: sourcev1.GitRepositoryKind,
- APIVersion: "source.toolkit.fluxcd.io/v1beta2",
- },
- ObjectMeta: metav1.ObjectMeta{
- Name: "gitrepo",
- Namespace: "foo",
- },
- }
- clientBuilder := fake.NewClientBuilder()
- clientBuilder.WithObjects(obj)
- c := clientBuilder.Build()
-
- patchHelper, err := patch.NewHelper(obj, c)
- g.Expect(err).ToNot(HaveOccurred())
-
- if tt.beforeFunc != nil {
- tt.beforeFunc(obj)
- }
-
- ctx := context.TODO()
- recResult := sreconcile.ResultSuccess
- var retErr error
-
- summarizeHelper := summarize.NewHelper(record.NewFakeRecorder(32), patchHelper)
- summarizeOpts := []summarize.Option{
- summarize.WithConditions(gitRepositoryReadyCondition),
- summarize.WithReconcileResult(recResult),
- summarize.WithReconcileError(retErr),
- summarize.WithIgnoreNotFound(),
- summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetRequeueAfter()}),
- summarize.WithPatchFieldOwner("source-controller"),
- }
- _, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...)
-
- key := client.ObjectKeyFromObject(obj)
- g.Expect(c.Get(ctx, key, obj)).ToNot(HaveOccurred())
- g.Expect(obj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions))
- })
- }
-}
-
-func TestGitRepositoryReconciler_notify(t *testing.T) {
- concreteCommit := git.Commit{
- Hash: git.Hash("some-hash"),
- Message: "test commit",
- Encoded: []byte("content"),
- }
- partialCommit := git.Commit{
- Hash: git.Hash("some-hash"),
- }
-
- noopErr := serror.NewGeneric(fmt.Errorf("some no-op error"), "NoOpReason")
- noopErr.Ignore = true
-
- tests := []struct {
- name string
- res sreconcile.Result
- resErr error
- oldObjBeforeFunc func(obj *sourcev1.GitRepository)
- newObjBeforeFunc func(obj *sourcev1.GitRepository)
- commit git.Commit
- wantEvent string
- }{
- {
- name: "error - no event",
- res: sreconcile.ResultEmpty,
- resErr: errors.New("some error"),
- },
- {
- name: "new artifact",
- res: sreconcile.ResultSuccess,
- resErr: nil,
- newObjBeforeFunc: func(obj *sourcev1.GitRepository) {
- obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
- },
- commit: concreteCommit,
- wantEvent: "Normal NewArtifact stored artifact for commit 'test commit'",
- },
- {
- name: "recovery from failure",
- res: sreconcile.ResultSuccess,
- resErr: nil,
- oldObjBeforeFunc: func(obj *sourcev1.GitRepository) {
- obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail")
- conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo")
- },
- newObjBeforeFunc: func(obj *sourcev1.GitRepository) {
- obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
- conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
- },
- commit: concreteCommit,
- wantEvent: "Normal Succeeded stored artifact for commit 'test commit'",
- },
- {
- name: "recovery and new artifact",
- res: sreconcile.ResultSuccess,
- resErr: nil,
- oldObjBeforeFunc: func(obj *sourcev1.GitRepository) {
- obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail")
- conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo")
- },
- newObjBeforeFunc: func(obj *sourcev1.GitRepository) {
- obj.Status.Artifact = &sourcev1.Artifact{Revision: "aaa", Checksum: "bbb"}
- conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
- },
- commit: concreteCommit,
- wantEvent: "Normal NewArtifact stored artifact for commit 'test commit'",
- },
- {
- name: "no updates",
- res: sreconcile.ResultSuccess,
- resErr: nil,
- oldObjBeforeFunc: func(obj *sourcev1.GitRepository) {
- obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
- conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
- },
- newObjBeforeFunc: func(obj *sourcev1.GitRepository) {
- obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
- conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
- },
- },
- {
- name: "no-op error result",
- res: sreconcile.ResultEmpty,
- resErr: noopErr,
- oldObjBeforeFunc: func(obj *sourcev1.GitRepository) {
- obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail")
- conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo")
- },
- newObjBeforeFunc: func(obj *sourcev1.GitRepository) {
- obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
- conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
- },
- commit: partialCommit, // no-op will always result in partial commit.
- wantEvent: "Normal Succeeded stored artifact for commit 'HEAD/some-hash'",
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
- recorder := record.NewFakeRecorder(32)
-
- oldObj := &sourcev1.GitRepository{}
- newObj := oldObj.DeepCopy()
-
- if tt.oldObjBeforeFunc != nil {
- tt.oldObjBeforeFunc(oldObj)
- }
- if tt.newObjBeforeFunc != nil {
- tt.newObjBeforeFunc(newObj)
- }
-
- reconciler := &GitRepositoryReconciler{
- EventRecorder: recorder,
- features: features.FeatureGates(),
- }
- reconciler.notify(ctx, oldObj, newObj, tt.commit, tt.res, tt.resErr)
-
- select {
- case x, ok := <-recorder.Events:
- g.Expect(ok).To(Equal(tt.wantEvent != ""), "unexpected event received")
- if tt.wantEvent != "" {
- g.Expect(x).To(ContainSubstring(tt.wantEvent))
- }
- default:
- if tt.wantEvent != "" {
- t.Errorf("expected some event to be emitted")
- }
- }
- })
- }
-}
-
-func TestGitRepositoryReconciler_fetchIncludes(t *testing.T) {
- type dependency struct {
- name string
- withArtifact bool
- conditions []metav1.Condition
- }
-
- type include struct {
- name string
- fromPath string
- toPath string
- shouldExist bool
- }
-
- tests := []struct {
- name string
- dependencies []dependency
- includes []include
- beforeFunc func(obj *sourcev1.GitRepository)
- wantErr bool
- wantArtifactSet artifactSet
- assertConditions []metav1.Condition
- }{
- {
- name: "Existing includes",
- dependencies: []dependency{
- {
- name: "a",
- withArtifact: true,
- conditions: []metav1.Condition{
- *conditions.TrueCondition(meta.ReadyCondition, "Foo", "foo ready"),
- },
- },
- {
- name: "b",
- withArtifact: true,
- conditions: []metav1.Condition{
- *conditions.TrueCondition(meta.ReadyCondition, "Bar", "bar ready"),
- },
- },
- },
- includes: []include{
- {name: "a", toPath: "a/", shouldExist: true},
- {name: "b", toPath: "b/", shouldExist: true},
- },
- wantErr: false,
- wantArtifactSet: []*sourcev1.Artifact{
- {Revision: "a"},
- {Revision: "b"},
- },
- },
- {
- name: "Include get failure",
- includes: []include{
- {name: "a", toPath: "a/"},
- },
- wantErr: true,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "NotFound", "could not get resource for include 'a': gitrepositories.source.toolkit.fluxcd.io \"a\" not found"),
- },
- },
- {
- name: "Include without an artifact makes IncludeUnavailable=True",
- dependencies: []dependency{
- {
- name: "a",
- withArtifact: false,
- conditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "Foo", "foo unavailable"),
- },
- },
- },
- includes: []include{
- {name: "a", toPath: "a/"},
- },
- wantErr: true,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.IncludeUnavailableCondition, "NoArtifact", "no artifact available for include 'a'"),
- },
- },
- {
- name: "Outdated IncludeUnavailable is removed",
- beforeFunc: func(obj *sourcev1.GitRepository) {
- conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, "NoArtifact", "")
- },
- assertConditions: []metav1.Condition{},
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
-
- var depObjs []client.Object
- for _, d := range tt.dependencies {
- obj := &sourcev1.GitRepository{
- ObjectMeta: metav1.ObjectMeta{
- Name: d.name,
- },
- Status: sourcev1.GitRepositoryStatus{
- Conditions: d.conditions,
- },
- }
- if d.withArtifact {
- obj.Status.Artifact = &sourcev1.Artifact{
- Path: d.name + ".tar.gz",
- Revision: d.name,
- LastUpdateTime: metav1.Now(),
- }
- }
- depObjs = append(depObjs, obj)
- }
-
- builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme())
- if len(tt.dependencies) > 0 {
- builder.WithObjects(depObjs...)
- }
-
- r := &GitRepositoryReconciler{
- Client: builder.Build(),
- EventRecorder: record.NewFakeRecorder(32),
- }
-
- obj := &sourcev1.GitRepository{
- ObjectMeta: metav1.ObjectMeta{
- Name: "reconcile-include",
- },
- Spec: sourcev1.GitRepositorySpec{
- Interval: metav1.Duration{Duration: interval},
- },
- }
-
- for i, incl := range tt.includes {
- incl := sourcev1.GitRepositoryInclude{
- GitRepositoryRef: meta.LocalObjectReference{Name: incl.name},
- FromPath: incl.fromPath,
- ToPath: incl.toPath,
- }
- tt.includes[i].fromPath = incl.GetFromPath()
- tt.includes[i].toPath = incl.GetToPath()
- obj.Spec.Include = append(obj.Spec.Include, incl)
- }
-
- gotArtifactSet, err := r.fetchIncludes(ctx, obj)
- g.Expect(err != nil).To(Equal(tt.wantErr))
- g.Expect(obj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions))
- if !tt.wantErr && gotArtifactSet != nil {
- g.Expect(gotArtifactSet.Diff(tt.wantArtifactSet)).To(BeFalse())
- }
- })
- }
-}
-
-func TestGitRepositoryReconciler_calculateContentConfigChecksum(t *testing.T) {
- g := NewWithT(t)
- obj := &sourcev1.GitRepository{}
- r := &GitRepositoryReconciler{}
-
- emptyChecksum := r.calculateContentConfigChecksum(obj, nil)
- g.Expect(emptyChecksum).To(Equal(emptyContentConfigChecksum))
-
- // Ignore modified.
- obj.Spec.Ignore = pointer.String("some-rule")
- ignoreModChecksum := r.calculateContentConfigChecksum(obj, nil)
- g.Expect(emptyChecksum).ToNot(Equal(ignoreModChecksum))
-
- // Recurse submodules modified.
- obj.Spec.RecurseSubmodules = true
- submodModChecksum := r.calculateContentConfigChecksum(obj, nil)
- g.Expect(ignoreModChecksum).ToNot(Equal(submodModChecksum))
-
- // Include modified.
- obj.Spec.Include = []sourcev1.GitRepositoryInclude{
- {
- GitRepositoryRef: meta.LocalObjectReference{Name: "foo"},
- FromPath: "aaa",
- ToPath: "bbb",
- },
- }
- artifacts := &artifactSet{
- &sourcev1.Artifact{Revision: "some-revision-1", Checksum: "some-checksum-1"},
- }
- includeModChecksum := r.calculateContentConfigChecksum(obj, artifacts)
- g.Expect(submodModChecksum).ToNot(Equal(includeModChecksum))
-
- // Artifact modified revision.
- artifacts = &artifactSet{
- &sourcev1.Artifact{Revision: "some-revision-2", Checksum: "some-checksum-1"},
- }
- artifactModChecksum := r.calculateContentConfigChecksum(obj, artifacts)
- g.Expect(includeModChecksum).ToNot(Equal(artifactModChecksum))
-
- // Artifact modified checksum.
- artifacts = &artifactSet{
- &sourcev1.Artifact{Revision: "some-revision-2", Checksum: "some-checksum-2"},
- }
- artifactCsumModChecksum := r.calculateContentConfigChecksum(obj, artifacts)
- g.Expect(artifactModChecksum).ToNot(Equal(artifactCsumModChecksum))
-}
-
-func resetChmod(path string, dirMode os.FileMode, fileMode os.FileMode) error {
- err := filepath.Walk(path,
- func(path string, info os.FileInfo, err error) error {
- if err != nil {
- return err
- }
-
- if info.IsDir() && info.Mode() != dirMode {
- os.Chmod(path, dirMode)
- } else if !info.IsDir() && info.Mode() != fileMode {
- os.Chmod(path, fileMode)
- }
- return nil
- })
- if err != nil {
- return fmt.Errorf("cannot reset file permissions: %v", err)
- }
-
- return nil
-}
diff --git a/controllers/helmchart_controller_test.go b/controllers/helmchart_controller_test.go
deleted file mode 100644
index 26e771c5a..000000000
--- a/controllers/helmchart_controller_test.go
+++ /dev/null
@@ -1,2047 +0,0 @@
-/*
-Copyright 2020 The Flux authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package controllers
-
-import (
- "bytes"
- "context"
- "encoding/base64"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "os"
- "path/filepath"
- "reflect"
- "strings"
- "testing"
- "time"
-
- "github.com/darkowlzz/controller-check/status"
- . "github.com/onsi/gomega"
- hchart "helm.sh/helm/v3/pkg/chart"
- "helm.sh/helm/v3/pkg/chart/loader"
- helmreg "helm.sh/helm/v3/pkg/registry"
- corev1 "k8s.io/api/core/v1"
- apierrors "k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/client-go/tools/record"
- kstatus "sigs.k8s.io/cli-utils/pkg/kstatus/status"
- "sigs.k8s.io/controller-runtime/pkg/client"
- "sigs.k8s.io/controller-runtime/pkg/client/fake"
- "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
-
- "github.com/fluxcd/pkg/apis/meta"
- "github.com/fluxcd/pkg/helmtestserver"
- "github.com/fluxcd/pkg/runtime/conditions"
- "github.com/fluxcd/pkg/runtime/patch"
- "github.com/fluxcd/pkg/testserver"
- sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
- serror "github.com/fluxcd/source-controller/internal/error"
- "github.com/fluxcd/source-controller/internal/helm/chart"
- "github.com/fluxcd/source-controller/internal/helm/registry"
- sreconcile "github.com/fluxcd/source-controller/internal/reconcile"
- "github.com/fluxcd/source-controller/internal/reconcile/summarize"
-)
-
-func TestHelmChartReconciler_Reconcile(t *testing.T) {
- g := NewWithT(t)
-
- const (
- chartName = "helmchart"
- chartVersion = "0.2.0"
- chartPath = "testdata/charts/helmchart"
- )
-
- serverFactory, err := helmtestserver.NewTempHelmServer()
- g.Expect(err).NotTo(HaveOccurred())
- defer os.RemoveAll(serverFactory.Root())
-
- g.Expect(serverFactory.PackageChartWithVersion(chartPath, chartVersion)).To(Succeed())
- g.Expect(serverFactory.GenerateIndex()).To(Succeed())
-
- tests := []struct {
- name string
- beforeFunc func(repository *sourcev1.HelmRepository)
- assertFunc func(g *WithT, obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository)
- }{
- {
- name: "Reconciles chart build",
- assertFunc: func(g *WithT, obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) {
- key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace}
-
- // Wait for finalizer to be set
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, obj); err != nil {
- return false
- }
- return len(obj.Finalizers) > 0
- }, timeout).Should(BeTrue())
-
- // Wait for HelmChart to be Ready
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, obj); err != nil {
- return false
- }
- if !conditions.IsReady(obj) || obj.Status.Artifact == nil {
- return false
- }
- readyCondition := conditions.Get(obj, meta.ReadyCondition)
- return obj.Generation == readyCondition.ObservedGeneration &&
- obj.Generation == obj.Status.ObservedGeneration
- }, timeout).Should(BeTrue())
-
- // Check if the object status is valid.
- condns := &status.Conditions{NegativePolarity: helmChartReadyCondition.NegativePolarity}
- checker := status.NewChecker(testEnv.Client, condns)
- checker.CheckErr(ctx, obj)
-
- // kstatus client conformance check.
- u, err := patch.ToUnstructured(obj)
- g.Expect(err).ToNot(HaveOccurred())
- res, err := kstatus.Compute(u)
- g.Expect(err).ToNot(HaveOccurred())
- g.Expect(res.Status).To(Equal(kstatus.CurrentStatus))
-
- // Patch the object with reconcile request annotation.
- patchHelper, err := patch.NewHelper(obj, testEnv.Client)
- g.Expect(err).ToNot(HaveOccurred())
- annotations := map[string]string{
- meta.ReconcileRequestAnnotation: "now",
- }
- obj.SetAnnotations(annotations)
- g.Expect(patchHelper.Patch(ctx, obj)).ToNot(HaveOccurred())
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, obj); err != nil {
- return false
- }
- return obj.Status.LastHandledReconcileAt == "now"
- }, timeout).Should(BeTrue())
-
- // Check if the cache contains the index.
- repoKey := client.ObjectKey{Name: repository.Name, Namespace: repository.Namespace}
- err = testEnv.Get(ctx, repoKey, repository)
- g.Expect(err).ToNot(HaveOccurred())
- localPath := testStorage.LocalPath(*repository.GetArtifact())
- _, found := testCache.Get(localPath)
- g.Expect(found).To(BeTrue())
-
- g.Expect(testEnv.Delete(ctx, obj)).To(Succeed())
-
- // Wait for HelmChart to be deleted
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, obj); err != nil {
- return apierrors.IsNotFound(err)
- }
- return false
- }, timeout).Should(BeTrue())
- },
- },
- {
- name: "Stalling on invalid repository URL",
- beforeFunc: func(repository *sourcev1.HelmRepository) {
- repository.Spec.URL = "://unsupported" // Invalid URL
- },
- assertFunc: func(g *WithT, obj *sourcev1.HelmChart, _ *sourcev1.HelmRepository) {
- key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace}
- // Wait for HelmChart to be FetchFailed == true
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, obj); err != nil {
- return false
- }
- if !conditions.IsTrue(obj, sourcev1.FetchFailedCondition) {
- return false
- }
- // observedGeneration is -1 because we have no successful reconciliation
- return obj.Status.ObservedGeneration == -1
- }, timeout).Should(BeTrue())
-
- // Check if the object status is valid.
- condns := &status.Conditions{NegativePolarity: helmChartReadyCondition.NegativePolarity}
- checker := status.NewChecker(testEnv.Client, condns)
- checker.CheckErr(ctx, obj)
-
- g.Expect(testEnv.Delete(ctx, obj)).To(Succeed())
-
- // Wait for HelmChart to be deleted
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, obj); err != nil {
- return apierrors.IsNotFound(err)
- }
- return false
- }, timeout).Should(BeTrue())
- },
- },
- {
- name: "Stalling on invalid oci repository URL",
- beforeFunc: func(repository *sourcev1.HelmRepository) {
- repository.Spec.URL = strings.Replace(repository.Spec.URL, "http", "oci", 1)
- },
- assertFunc: func(g *WithT, obj *sourcev1.HelmChart, _ *sourcev1.HelmRepository) {
- key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace}
- // Wait for HelmChart to be Ready
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, obj); err != nil {
- return false
- }
- if !conditions.IsTrue(obj, sourcev1.FetchFailedCondition) {
- return false
- }
- // observedGeneration is -1 because we have no successful reconciliation
- return obj.Status.ObservedGeneration == -1
- }, timeout).Should(BeTrue())
-
- // Check if the object status is valid.
- condns := &status.Conditions{NegativePolarity: helmChartReadyCondition.NegativePolarity}
- checker := status.NewChecker(testEnv.Client, condns)
- checker.CheckErr(ctx, obj)
-
- g.Expect(testEnv.Delete(ctx, obj)).To(Succeed())
-
- // Wait for HelmChart to be deleted
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, obj); err != nil {
- return apierrors.IsNotFound(err)
- }
- return false
- }, timeout).Should(BeTrue())
- },
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
-
- server := testserver.NewHTTPServer(serverFactory.Root())
- server.Start()
- defer server.Stop()
-
- ns, err := testEnv.CreateNamespace(ctx, "helmchart")
- g.Expect(err).ToNot(HaveOccurred())
- defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }()
-
- repository := sourcev1.HelmRepository{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "helmrepository-",
- Namespace: ns.Name,
- },
- Spec: sourcev1.HelmRepositorySpec{
- URL: server.URL(),
- },
- }
-
- if tt.beforeFunc != nil {
- tt.beforeFunc(&repository)
- }
-
- g.Expect(testEnv.CreateAndWait(ctx, &repository)).To(Succeed())
-
- obj := sourcev1.HelmChart{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "helmrepository-reconcile-",
- Namespace: ns.Name,
- },
- Spec: sourcev1.HelmChartSpec{
- Chart: chartName,
- Version: chartVersion,
- SourceRef: sourcev1.LocalHelmChartSourceReference{
- Kind: sourcev1.HelmRepositoryKind,
- Name: repository.Name,
- },
- },
- }
- g.Expect(testEnv.Create(ctx, &obj)).To(Succeed())
-
- if tt.assertFunc != nil {
- tt.assertFunc(g, &obj, &repository)
- }
- })
- }
-}
-
-func TestHelmChartReconciler_reconcileStorage(t *testing.T) {
- tests := []struct {
- name string
- beforeFunc func(obj *sourcev1.HelmChart, storage *Storage) error
- want sreconcile.Result
- wantErr bool
- assertArtifact *sourcev1.Artifact
- assertConditions []metav1.Condition
- assertPaths []string
- }{
- {
- name: "garbage collects",
- beforeFunc: func(obj *sourcev1.HelmChart, storage *Storage) error {
- revisions := []string{"a", "b", "c", "d"}
- for n := range revisions {
- v := revisions[n]
- obj.Status.Artifact = &sourcev1.Artifact{
- Path: fmt.Sprintf("/reconcile-storage/%s.txt", v),
- Revision: v,
- }
- if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil {
- return err
- }
- if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(v), 0o640); err != nil {
- return err
- }
- if n != len(revisions)-1 {
- time.Sleep(time.Second * 1)
- }
- }
- testStorage.SetArtifactURL(obj.Status.Artifact)
- return nil
- },
- assertArtifact: &sourcev1.Artifact{
- Path: "/reconcile-storage/d.txt",
- Revision: "d",
- Checksum: "18ac3e7343f016890c510e93f935261169d9e3f565436429830faf0934f4f8e4",
- URL: testStorage.Hostname + "/reconcile-storage/d.txt",
- Size: int64p(int64(len("d"))),
- },
- assertPaths: []string{
- "/reconcile-storage/d.txt",
- "/reconcile-storage/c.txt",
- "!/reconcile-storage/b.txt",
- "!/reconcile-storage/a.txt",
- },
- want: sreconcile.ResultSuccess,
- },
- {
- name: "notices missing artifact in storage",
- beforeFunc: func(obj *sourcev1.HelmChart, storage *Storage) error {
- obj.Status.Artifact = &sourcev1.Artifact{
- Path: "/reconcile-storage/invalid.txt",
- Revision: "d",
- }
- testStorage.SetArtifactURL(obj.Status.Artifact)
- return nil
- },
- want: sreconcile.ResultSuccess,
- assertPaths: []string{
- "!/reconcile-storage/invalid.txt",
- },
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(meta.ReconcilingCondition, "NoArtifact", "no artifact for resource in storage"),
- },
- },
- {
- name: "updates hostname on diff from current",
- beforeFunc: func(obj *sourcev1.HelmChart, storage *Storage) error {
- obj.Status.Artifact = &sourcev1.Artifact{
- Path: "/reconcile-storage/hostname.txt",
- Revision: "f",
- Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80",
- URL: "http://outdated.com/reconcile-storage/hostname.txt",
- }
- if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil {
- return err
- }
- if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader("file"), 0o640); err != nil {
- return err
- }
- return nil
- },
- want: sreconcile.ResultSuccess,
- assertPaths: []string{
- "/reconcile-storage/hostname.txt",
- },
- assertArtifact: &sourcev1.Artifact{
- Path: "/reconcile-storage/hostname.txt",
- Revision: "f",
- Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80",
- URL: testStorage.Hostname + "/reconcile-storage/hostname.txt",
- Size: int64p(int64(len("file"))),
- },
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
-
- defer func() {
- g.Expect(os.RemoveAll(filepath.Join(testStorage.BasePath, "/reconcile-storage"))).To(Succeed())
- }()
-
- r := &HelmChartReconciler{
- EventRecorder: record.NewFakeRecorder(32),
- Storage: testStorage,
- }
-
- obj := &sourcev1.HelmChart{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "test-",
- },
- }
- if tt.beforeFunc != nil {
- g.Expect(tt.beforeFunc(obj, testStorage)).To(Succeed())
- }
-
- got, err := r.reconcileStorage(context.TODO(), obj, nil)
- g.Expect(err != nil).To(Equal(tt.wantErr))
- g.Expect(got).To(Equal(tt.want))
-
- g.Expect(obj.Status.Artifact).To(MatchArtifact(tt.assertArtifact))
- if tt.assertArtifact != nil && tt.assertArtifact.URL != "" {
- g.Expect(obj.Status.Artifact.URL).To(Equal(tt.assertArtifact.URL))
- }
- g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions))
-
- for _, p := range tt.assertPaths {
- absoluteP := filepath.Join(testStorage.BasePath, p)
- if !strings.HasPrefix(p, "!") {
- g.Expect(absoluteP).To(BeAnExistingFile())
- continue
- }
- g.Expect(absoluteP).NotTo(BeAnExistingFile())
- }
- })
- }
-}
-
-func TestHelmChartReconciler_reconcileSource(t *testing.T) {
- g := NewWithT(t)
-
- tmpDir := t.TempDir()
-
- storage, err := NewStorage(tmpDir, "example.com", retentionTTL, retentionRecords)
- g.Expect(err).ToNot(HaveOccurred())
-
- gitArtifact := &sourcev1.Artifact{
- Revision: "mock-ref/abcdefg12345678",
- Path: "mock.tgz",
- }
- g.Expect(storage.Archive(gitArtifact, "testdata/charts", nil)).To(Succeed())
-
- tests := []struct {
- name string
- source sourcev1.Source
- beforeFunc func(obj *sourcev1.HelmChart)
- want sreconcile.Result
- wantErr error
- assertFunc func(g *WithT, build chart.Build, obj sourcev1.HelmChart)
- cleanFunc func(g *WithT, build *chart.Build)
- }{
- {
- name: "Observes Artifact revision and build result",
- source: &sourcev1.GitRepository{
- ObjectMeta: metav1.ObjectMeta{
- Name: "gitrepository",
- Namespace: "default",
- },
- Status: sourcev1.GitRepositoryStatus{
- Artifact: gitArtifact,
- },
- },
- beforeFunc: func(obj *sourcev1.HelmChart) {
- obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz"
- obj.Spec.SourceRef = sourcev1.LocalHelmChartSourceReference{
- Name: "gitrepository",
- Kind: sourcev1.GitRepositoryKind,
- }
- },
- want: sreconcile.ResultSuccess,
- assertFunc: func(g *WithT, build chart.Build, obj sourcev1.HelmChart) {
- g.Expect(build.Complete()).To(BeTrue())
- g.Expect(build.Name).To(Equal("helmchart"))
- g.Expect(build.Version).To(Equal("0.1.0"))
- g.Expect(build.Path).To(BeARegularFile())
-
- g.Expect(obj.Status.ObservedSourceArtifactRevision).To(Equal(gitArtifact.Revision))
- g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewChart", "pulled 'helmchart' chart with version '0.1.0'"),
- }))
- },
- cleanFunc: func(g *WithT, build *chart.Build) {
- g.Expect(os.Remove(build.Path)).To(Succeed())
- },
- },
- {
- name: "Error on unavailable source",
- beforeFunc: func(obj *sourcev1.HelmChart) {
- obj.Spec.SourceRef = sourcev1.LocalHelmChartSourceReference{
- Name: "unavailable",
- Kind: sourcev1.GitRepositoryKind,
- }
- },
- want: sreconcile.ResultEmpty,
- wantErr: &serror.Event{Err: errors.New("gitrepositories.source.toolkit.fluxcd.io \"unavailable\" not found")},
- assertFunc: func(g *WithT, build chart.Build, obj sourcev1.HelmChart) {
- g.Expect(build.Complete()).To(BeFalse())
-
- g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, "SourceUnavailable", "failed to get source: gitrepositories.source.toolkit.fluxcd.io \"unavailable\" not found"),
- }))
- },
- },
- {
- name: "Stalling on unsupported source kind",
- beforeFunc: func(obj *sourcev1.HelmChart) {
- obj.Spec.SourceRef = sourcev1.LocalHelmChartSourceReference{
- Name: "unavailable",
- Kind: "Unsupported",
- }
- },
- want: sreconcile.ResultEmpty,
- wantErr: &serror.Stalling{Err: errors.New("unsupported source kind 'Unsupported'")},
- assertFunc: func(g *WithT, build chart.Build, obj sourcev1.HelmChart) {
- g.Expect(build.Complete()).To(BeFalse())
-
- g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, "SourceUnavailable", "failed to get source: unsupported source kind"),
- }))
- },
- },
- {
- name: "Stalling on persistent build error",
- source: &sourcev1.GitRepository{
- ObjectMeta: metav1.ObjectMeta{
- Name: "gitrepository",
- Namespace: "default",
- },
- Status: sourcev1.GitRepositoryStatus{
- Artifact: gitArtifact,
- },
- },
- beforeFunc: func(obj *sourcev1.HelmChart) {
- obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz"
- obj.Spec.SourceRef = sourcev1.LocalHelmChartSourceReference{
- Name: "gitrepository",
- Kind: sourcev1.GitRepositoryKind,
- }
- obj.Spec.ValuesFiles = []string{"invalid.yaml"}
- },
- want: sreconcile.ResultEmpty,
- wantErr: &serror.Stalling{Err: errors.New("values files merge error: no values file found at path")},
- assertFunc: func(g *WithT, build chart.Build, obj sourcev1.HelmChart) {
- g.Expect(build.Complete()).To(BeFalse())
-
- g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{
- *conditions.TrueCondition(sourcev1.BuildFailedCondition, "ValuesFilesError", "values files merge error: no values file found at path"),
- }))
- },
- },
- {
- name: "ResultRequeue when source artifact is unavailable",
- source: &sourcev1.GitRepository{
- ObjectMeta: metav1.ObjectMeta{
- Name: "gitrepository",
- Namespace: "default",
- },
- Status: sourcev1.GitRepositoryStatus{},
- },
- beforeFunc: func(obj *sourcev1.HelmChart) {
- obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz"
- obj.Spec.SourceRef = sourcev1.LocalHelmChartSourceReference{
- Name: "gitrepository",
- Kind: sourcev1.GitRepositoryKind,
- }
- obj.Status.ObservedSourceArtifactRevision = "foo"
- },
- want: sreconcile.ResultRequeue,
- assertFunc: func(g *WithT, build chart.Build, obj sourcev1.HelmChart) {
- g.Expect(build.Complete()).To(BeFalse())
-
- g.Expect(obj.Status.ObservedSourceArtifactRevision).To(Equal("foo"))
- g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, "NoSourceArtifact", "no artifact available"),
- }))
- },
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
-
- clientBuilder := fake.NewClientBuilder()
- if tt.source != nil {
- clientBuilder.WithRuntimeObjects(tt.source)
- }
-
- r := &HelmChartReconciler{
- Client: clientBuilder.Build(),
- EventRecorder: record.NewFakeRecorder(32),
- Storage: storage,
- }
-
- obj := sourcev1.HelmChart{
- ObjectMeta: metav1.ObjectMeta{
- Name: "chart",
- Namespace: "default",
- },
- Spec: sourcev1.HelmChartSpec{},
- }
- if tt.beforeFunc != nil {
- tt.beforeFunc(&obj)
- }
-
- var b chart.Build
- if tt.cleanFunc != nil {
- defer tt.cleanFunc(g, &b)
- }
-
- got, err := r.reconcileSource(context.TODO(), &obj, &b)
-
- g.Expect(err != nil).To(Equal(tt.wantErr != nil))
- if tt.wantErr != nil {
- g.Expect(reflect.TypeOf(err).String()).To(Equal(reflect.TypeOf(tt.wantErr).String()))
- g.Expect(err.Error()).To(ContainSubstring(tt.wantErr.Error()))
- }
- g.Expect(got).To(Equal(tt.want))
-
- if tt.assertFunc != nil {
- tt.assertFunc(g, b, obj)
- }
- })
- }
-}
-
-func TestHelmChartReconciler_buildFromHelmRepository(t *testing.T) {
- g := NewWithT(t)
-
- const (
- chartName = "helmchart"
- chartVersion = "0.2.0"
- higherChartVersion = "0.3.0"
- chartPath = "testdata/charts/helmchart"
- )
-
- serverFactory, err := helmtestserver.NewTempHelmServer()
- g.Expect(err).NotTo(HaveOccurred())
- defer os.RemoveAll(serverFactory.Root())
-
- for _, ver := range []string{chartVersion, higherChartVersion} {
- g.Expect(serverFactory.PackageChartWithVersion(chartPath, ver)).To(Succeed())
- }
- g.Expect(serverFactory.GenerateIndex()).To(Succeed())
-
- type options struct {
- username string
- password string
- }
-
- tests := []struct {
- name string
- server options
- secret *corev1.Secret
- beforeFunc func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository)
- want sreconcile.Result
- wantErr error
- assertFunc func(g *WithT, obj *sourcev1.HelmChart, build chart.Build)
- cleanFunc func(g *WithT, build *chart.Build)
- }{
- {
- name: "Reconciles chart build",
- beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) {
- obj.Spec.Chart = "helmchart"
- },
- want: sreconcile.ResultSuccess,
- assertFunc: func(g *WithT, _ *sourcev1.HelmChart, build chart.Build) {
- g.Expect(build.Name).To(Equal(chartName))
- g.Expect(build.Version).To(Equal(higherChartVersion))
- g.Expect(build.Path).ToNot(BeEmpty())
- g.Expect(build.Path).To(BeARegularFile())
- },
- cleanFunc: func(g *WithT, build *chart.Build) {
- g.Expect(os.Remove(build.Path)).To(Succeed())
- },
- },
- {
- name: "Reconciles chart build with repository credentials",
- server: options{
- username: "foo",
- password: "bar",
- },
- secret: &corev1.Secret{
- ObjectMeta: metav1.ObjectMeta{
- Name: "auth",
- },
- Data: map[string][]byte{
- "username": []byte("foo"),
- "password": []byte("bar"),
- },
- },
- beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) {
- obj.Spec.Chart = chartName
- obj.Spec.Version = chartVersion
- repository.Spec.SecretRef = &meta.LocalObjectReference{Name: "auth"}
- },
- want: sreconcile.ResultSuccess,
- assertFunc: func(g *WithT, _ *sourcev1.HelmChart, build chart.Build) {
- g.Expect(build.Name).To(Equal(chartName))
- g.Expect(build.Version).To(Equal(chartVersion))
- g.Expect(build.Path).ToNot(BeEmpty())
- g.Expect(build.Path).To(BeARegularFile())
- },
- cleanFunc: func(g *WithT, build *chart.Build) {
- g.Expect(os.Remove(build.Path)).To(Succeed())
- },
- },
- {
- name: "Uses artifact as build cache",
- beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) {
- obj.Spec.Chart = chartName
- obj.Spec.Version = chartVersion
- obj.Status.Artifact = &sourcev1.Artifact{Path: chartName + "-" + chartVersion + ".tgz"}
- },
- want: sreconcile.ResultSuccess,
- assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) {
- g.Expect(build.Name).To(Equal(chartName))
- g.Expect(build.Version).To(Equal(chartVersion))
- g.Expect(build.Path).To(Equal(filepath.Join(serverFactory.Root(), obj.Status.Artifact.Path)))
- g.Expect(build.Path).To(BeARegularFile())
- },
- },
- {
- name: "Sets Generation as VersionMetadata with values files",
- beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) {
- obj.Spec.Chart = chartName
- obj.Generation = 3
- obj.Spec.ValuesFiles = []string{"values.yaml", "override.yaml"}
- },
- want: sreconcile.ResultSuccess,
- assertFunc: func(g *WithT, _ *sourcev1.HelmChart, build chart.Build) {
- g.Expect(build.Name).To(Equal(chartName))
- g.Expect(build.Version).To(Equal(higherChartVersion + "+3"))
- g.Expect(build.Path).ToNot(BeEmpty())
- g.Expect(build.Path).To(BeARegularFile())
- },
- cleanFunc: func(g *WithT, build *chart.Build) {
- g.Expect(os.Remove(build.Path)).To(Succeed())
- },
- },
- {
- name: "Forces build on generation change",
- beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) {
- obj.Generation = 3
- obj.Spec.Chart = chartName
- obj.Spec.Version = chartVersion
-
- obj.Status.ObservedGeneration = 2
- obj.Status.Artifact = &sourcev1.Artifact{Path: chartName + "-" + chartVersion + ".tgz"}
- },
- want: sreconcile.ResultSuccess,
- assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) {
- g.Expect(build.Name).To(Equal(chartName))
- g.Expect(build.Version).To(Equal(chartVersion))
- g.Expect(build.Path).ToNot(Equal(filepath.Join(serverFactory.Root(), obj.Status.Artifact.Path)))
- g.Expect(build.Path).To(BeARegularFile())
- },
- cleanFunc: func(g *WithT, build *chart.Build) {
- g.Expect(os.Remove(build.Path)).To(Succeed())
- },
- },
- {
- name: "Event on unsuccessful secret retrieval",
- beforeFunc: func(_ *sourcev1.HelmChart, repository *sourcev1.HelmRepository) {
- repository.Spec.SecretRef = &meta.LocalObjectReference{
- Name: "invalid",
- }
- },
- want: sreconcile.ResultEmpty,
- wantErr: &serror.Event{Err: errors.New("failed to get secret 'invalid'")},
- assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) {
- g.Expect(build.Complete()).To(BeFalse())
-
- g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret 'invalid'"),
- }))
- },
- },
- {
- name: "Stalling on invalid client options",
- beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) {
- repository.Spec.URL = "file://unsupported" // Unsupported protocol
- },
- want: sreconcile.ResultEmpty,
- wantErr: &serror.Stalling{Err: errors.New("scheme \"file\" not supported")},
- assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) {
- g.Expect(build.Complete()).To(BeFalse())
-
- g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, meta.FailedReason, "failed to construct Helm client"),
- }))
- },
- },
- {
- name: "Stalling on invalid repository URL",
- beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) {
- repository.Spec.URL = "://unsupported" // Invalid URL
- },
- want: sreconcile.ResultEmpty,
- wantErr: &serror.Stalling{Err: errors.New("missing protocol scheme")},
- assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) {
- g.Expect(build.Complete()).To(BeFalse())
-
- g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.URLInvalidReason, "invalid Helm repository URL"),
- }))
- },
- },
- {
- name: "BuildError on temporary build error",
- beforeFunc: func(obj *sourcev1.HelmChart, _ *sourcev1.HelmRepository) {
- obj.Spec.Chart = "invalid"
- },
- want: sreconcile.ResultEmpty,
- wantErr: &chart.BuildError{Err: errors.New("failed to get chart version for remote reference")},
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
-
- server := testserver.NewHTTPServer(serverFactory.Root())
- server.Start()
- defer server.Stop()
-
- if len(tt.server.username+tt.server.password) > 0 {
- server.WithMiddleware(func(handler http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- u, p, ok := r.BasicAuth()
- if !ok || u != tt.server.username || p != tt.server.password {
- w.WriteHeader(401)
- return
- }
- handler.ServeHTTP(w, r)
- })
- })
- }
-
- clientBuilder := fake.NewClientBuilder()
- if tt.secret != nil {
- clientBuilder.WithObjects(tt.secret.DeepCopy())
- }
-
- storage, err := newTestStorage(server)
- g.Expect(err).ToNot(HaveOccurred())
-
- r := &HelmChartReconciler{
- Client: clientBuilder.Build(),
- EventRecorder: record.NewFakeRecorder(32),
- Getters: testGetters,
- Storage: storage,
- }
-
- repository := &sourcev1.HelmRepository{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "helmrepository-",
- },
- Spec: sourcev1.HelmRepositorySpec{
- URL: server.URL(),
- Timeout: &metav1.Duration{Duration: timeout},
- },
- Status: sourcev1.HelmRepositoryStatus{
- Artifact: &sourcev1.Artifact{
- Path: "index.yaml",
- },
- },
- }
- obj := &sourcev1.HelmChart{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "helmrepository-",
- },
- Spec: sourcev1.HelmChartSpec{},
- }
-
- if tt.beforeFunc != nil {
- tt.beforeFunc(obj, repository)
- }
-
- var b chart.Build
- if tt.cleanFunc != nil {
- defer tt.cleanFunc(g, &b)
- }
- got, err := r.buildFromHelmRepository(context.TODO(), obj, repository, &b)
-
- g.Expect(err != nil).To(Equal(tt.wantErr != nil))
- if tt.wantErr != nil {
- g.Expect(reflect.TypeOf(err).String()).To(Equal(reflect.TypeOf(tt.wantErr).String()))
- g.Expect(err.Error()).To(ContainSubstring(tt.wantErr.Error()))
- }
- g.Expect(got).To(Equal(tt.want))
-
- if tt.assertFunc != nil {
- tt.assertFunc(g, obj, b)
- }
- })
- }
-}
-
-func TestHelmChartReconciler_buildFromOCIHelmRepository(t *testing.T) {
- g := NewWithT(t)
-
- tmpDir := t.TempDir()
-
- const (
- chartPath = "testdata/charts/helmchart-0.1.0.tgz"
- )
-
- // Login to the registry
- err := testRegistryServer.registryClient.Login(testRegistryServer.registryHost,
- helmreg.LoginOptBasicAuth(testRegistryUsername, testRegistryPassword),
- helmreg.LoginOptInsecure(true))
- g.Expect(err).NotTo(HaveOccurred())
-
- // Load a test chart
- chartData, err := ioutil.ReadFile(chartPath)
- g.Expect(err).NotTo(HaveOccurred())
- metadata, err := extractChartMeta(chartData)
- g.Expect(err).NotTo(HaveOccurred())
-
- // Upload the test chart
- ref := fmt.Sprintf("%s/testrepo/%s:%s", testRegistryServer.registryHost, metadata.Name, metadata.Version)
- _, err = testRegistryServer.registryClient.Push(chartData, ref)
- g.Expect(err).NotTo(HaveOccurred())
-
- storage, err := NewStorage(tmpDir, "example.com", retentionTTL, retentionRecords)
- g.Expect(err).ToNot(HaveOccurred())
-
- cachedArtifact := &sourcev1.Artifact{
- Revision: "0.1.0",
- Path: metadata.Name + "-" + metadata.Version + ".tgz",
- }
- g.Expect(storage.CopyFromPath(cachedArtifact, "testdata/charts/helmchart-0.1.0.tgz")).To(Succeed())
-
- tests := []struct {
- name string
- secret *corev1.Secret
- beforeFunc func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository)
- want sreconcile.Result
- wantErr error
- assertFunc func(g *WithT, obj *sourcev1.HelmChart, build chart.Build)
- cleanFunc func(g *WithT, build *chart.Build)
- }{
- {
- name: "Reconciles chart build with docker repository credentials",
- secret: &corev1.Secret{
- ObjectMeta: metav1.ObjectMeta{
- Name: "auth",
- },
- Type: corev1.SecretTypeDockerConfigJson,
- Data: map[string][]byte{
- ".dockerconfigjson": []byte(`{"auths":{"` +
- testRegistryServer.registryHost + `":{"` +
- `auth":"` + base64.StdEncoding.EncodeToString([]byte(testRegistryUsername+":"+testRegistryPassword)) + `"}}}`),
- },
- },
- beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) {
- obj.Spec.Chart = metadata.Name
- obj.Spec.Version = metadata.Version
- repository.Spec.SecretRef = &meta.LocalObjectReference{Name: "auth"}
- },
- want: sreconcile.ResultSuccess,
- assertFunc: func(g *WithT, _ *sourcev1.HelmChart, build chart.Build) {
- g.Expect(build.Name).To(Equal(metadata.Name))
- g.Expect(build.Version).To(Equal(metadata.Version))
- g.Expect(build.Path).ToNot(BeEmpty())
- g.Expect(build.Path).To(BeARegularFile())
- },
- cleanFunc: func(g *WithT, build *chart.Build) {
- g.Expect(os.Remove(build.Path)).To(Succeed())
- },
- },
- {
- name: "Reconciles chart build with repository credentials",
- secret: &corev1.Secret{
- ObjectMeta: metav1.ObjectMeta{
- Name: "auth",
- },
- Data: map[string][]byte{
- "username": []byte(testRegistryUsername),
- "password": []byte(testRegistryPassword),
- },
- },
- beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) {
- obj.Spec.Chart = metadata.Name
- obj.Spec.Version = metadata.Version
- repository.Spec.SecretRef = &meta.LocalObjectReference{Name: "auth"}
- },
- want: sreconcile.ResultSuccess,
- assertFunc: func(g *WithT, _ *sourcev1.HelmChart, build chart.Build) {
- g.Expect(build.Name).To(Equal(metadata.Name))
- g.Expect(build.Version).To(Equal(metadata.Version))
- g.Expect(build.Path).ToNot(BeEmpty())
- g.Expect(build.Path).To(BeARegularFile())
- },
- cleanFunc: func(g *WithT, build *chart.Build) {
- g.Expect(os.Remove(build.Path)).To(Succeed())
- },
- },
- {
- name: "Uses artifact as build cache",
- beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) {
- obj.Spec.Chart = metadata.Name
- obj.Spec.Version = metadata.Version
- obj.Status.Artifact = &sourcev1.Artifact{Path: metadata.Name + "-" + metadata.Version + ".tgz"}
- },
- want: sreconcile.ResultSuccess,
- assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) {
- g.Expect(build.Name).To(Equal(metadata.Name))
- g.Expect(build.Version).To(Equal(metadata.Version))
- g.Expect(build.Path).To(Equal(storage.LocalPath(*cachedArtifact.DeepCopy())))
- g.Expect(build.Path).To(BeARegularFile())
- },
- },
- {
- name: "Forces build on generation change",
- beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) {
- obj.Generation = 3
- obj.Spec.Chart = metadata.Name
- obj.Spec.Version = metadata.Version
-
- obj.Status.ObservedGeneration = 2
- obj.Status.Artifact = &sourcev1.Artifact{Path: metadata.Name + "-" + metadata.Version + ".tgz"}
- },
- want: sreconcile.ResultSuccess,
- assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) {
- g.Expect(build.Name).To(Equal(metadata.Name))
- g.Expect(build.Version).To(Equal(metadata.Version))
- fmt.Println("buildpath", build.Path)
- fmt.Println("storage Path", storage.LocalPath(*cachedArtifact.DeepCopy()))
- g.Expect(build.Path).ToNot(Equal(storage.LocalPath(*cachedArtifact.DeepCopy())))
- g.Expect(build.Path).To(BeARegularFile())
- },
- cleanFunc: func(g *WithT, build *chart.Build) {
- g.Expect(os.Remove(build.Path)).To(Succeed())
- },
- },
- {
- name: "Event on unsuccessful secret retrieval",
- beforeFunc: func(_ *sourcev1.HelmChart, repository *sourcev1.HelmRepository) {
- repository.Spec.SecretRef = &meta.LocalObjectReference{
- Name: "invalid",
- }
- },
- want: sreconcile.ResultEmpty,
- wantErr: &serror.Event{Err: errors.New("failed to get secret 'invalid'")},
- assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) {
- g.Expect(build.Complete()).To(BeFalse())
-
- g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret 'invalid'"),
- }))
- },
- },
- {
- name: "Stalling on invalid client options",
- beforeFunc: func(obj *sourcev1.HelmChart, repository *sourcev1.HelmRepository) {
- repository.Spec.URL = "https://unsupported" // Unsupported protocol
- },
- want: sreconcile.ResultEmpty,
- wantErr: &serror.Stalling{Err: errors.New("failed to construct Helm client: invalid OCI registry URL: https://unsupported")},
- assertFunc: func(g *WithT, obj *sourcev1.HelmChart, build chart.Build) {
- g.Expect(build.Complete()).To(BeFalse())
-
- g.Expect(obj.Status.Conditions).To(conditions.MatchConditions([]metav1.Condition{
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, meta.FailedReason, "failed to construct Helm client"),
- }))
- },
- },
- {
- name: "BuildError on temporary build error",
- beforeFunc: func(obj *sourcev1.HelmChart, _ *sourcev1.HelmRepository) {
- obj.Spec.Chart = "invalid"
- },
- want: sreconcile.ResultEmpty,
- wantErr: &chart.BuildError{Err: errors.New("failed to get chart version for remote reference")},
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
-
- clientBuilder := fake.NewClientBuilder()
- if tt.secret != nil {
- clientBuilder.WithObjects(tt.secret.DeepCopy())
- }
-
- r := &HelmChartReconciler{
- Client: clientBuilder.Build(),
- EventRecorder: record.NewFakeRecorder(32),
- Getters: testGetters,
- Storage: storage,
- RegistryClientGenerator: registry.ClientGenerator,
- }
-
- repository := &sourcev1.HelmRepository{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "helmrepository-",
- },
- Spec: sourcev1.HelmRepositorySpec{
- URL: fmt.Sprintf("oci://%s/testrepo", testRegistryServer.registryHost),
- Timeout: &metav1.Duration{Duration: timeout},
- Type: sourcev1.HelmRepositoryTypeOCI,
- },
- }
- obj := &sourcev1.HelmChart{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "helmrepository-",
- },
- Spec: sourcev1.HelmChartSpec{},
- }
-
- if tt.beforeFunc != nil {
- tt.beforeFunc(obj, repository)
- }
-
- var b chart.Build
- if tt.cleanFunc != nil {
- defer tt.cleanFunc(g, &b)
- }
- got, err := r.buildFromHelmRepository(context.TODO(), obj, repository, &b)
-
- g.Expect(err != nil).To(Equal(tt.wantErr != nil))
- if tt.wantErr != nil {
- g.Expect(reflect.TypeOf(err).String()).To(Equal(reflect.TypeOf(tt.wantErr).String()))
- g.Expect(err.Error()).To(ContainSubstring(tt.wantErr.Error()))
- }
- g.Expect(got).To(Equal(tt.want))
-
- if tt.assertFunc != nil {
- tt.assertFunc(g, obj, b)
- }
- })
- }
-}
-
-func TestHelmChartReconciler_buildFromTarballArtifact(t *testing.T) {
- g := NewWithT(t)
-
- tmpDir := t.TempDir()
-
- storage, err := NewStorage(tmpDir, "example.com", retentionTTL, retentionRecords)
- g.Expect(err).ToNot(HaveOccurred())
-
- chartsArtifact := &sourcev1.Artifact{
- Revision: "mock-ref/abcdefg12345678",
- Path: "mock.tgz",
- }
- g.Expect(storage.Archive(chartsArtifact, "testdata/charts", nil)).To(Succeed())
- yamlArtifact := &sourcev1.Artifact{
- Revision: "9876abcd",
- Path: "values.yaml",
- }
- g.Expect(storage.CopyFromPath(yamlArtifact, "testdata/charts/helmchart/values.yaml")).To(Succeed())
- cachedArtifact := &sourcev1.Artifact{
- Revision: "0.1.0",
- Path: "cached.tgz",
- }
- g.Expect(storage.CopyFromPath(cachedArtifact, "testdata/charts/helmchart-0.1.0.tgz")).To(Succeed())
-
- tests := []struct {
- name string
- source sourcev1.Artifact
- beforeFunc func(obj *sourcev1.HelmChart)
- want sreconcile.Result
- wantErr error
- assertFunc func(g *WithT, build chart.Build)
- cleanFunc func(g *WithT, build *chart.Build)
- }{
- {
- name: "Resolves chart dependencies and builds",
- source: *chartsArtifact.DeepCopy(),
- beforeFunc: func(obj *sourcev1.HelmChart) {
- obj.Spec.Chart = "testdata/charts/helmchartwithdeps"
- },
- want: sreconcile.ResultSuccess,
- assertFunc: func(g *WithT, build chart.Build) {
- g.Expect(build.Name).To(Equal("helmchartwithdeps"))
- g.Expect(build.Version).To(Equal("0.1.0"))
- g.Expect(build.ResolvedDependencies).To(Equal(4))
- g.Expect(build.Path).To(BeARegularFile())
- },
- cleanFunc: func(g *WithT, build *chart.Build) {
- g.Expect(os.Remove(build.Path)).To(Succeed())
- },
- },
- {
- name: "ReconcileStrategyRevision sets VersionMetadata",
- source: *chartsArtifact.DeepCopy(),
- beforeFunc: func(obj *sourcev1.HelmChart) {
- obj.Spec.Chart = "testdata/charts/helmchart"
- obj.Spec.SourceRef.Kind = sourcev1.GitRepositoryKind
- obj.Spec.ReconcileStrategy = sourcev1.ReconcileStrategyRevision
- },
- want: sreconcile.ResultSuccess,
- assertFunc: func(g *WithT, build chart.Build) {
- g.Expect(build.Name).To(Equal("helmchart"))
- g.Expect(build.Version).To(Equal("0.1.0+abcdefg12345"))
- g.Expect(build.ResolvedDependencies).To(Equal(0))
- g.Expect(build.Path).To(BeARegularFile())
- },
- cleanFunc: func(g *WithT, build *chart.Build) {
- g.Expect(os.Remove(build.Path)).To(Succeed())
- },
- },
- {
- name: "ValuesFiles sets Generation as VersionMetadata",
- source: *chartsArtifact.DeepCopy(),
- beforeFunc: func(obj *sourcev1.HelmChart) {
- obj.Generation = 3
- obj.Spec.Chart = "testdata/charts/helmchart"
- obj.Spec.SourceRef.Kind = sourcev1.GitRepositoryKind
- obj.Spec.ValuesFiles = []string{
- filepath.Join(obj.Spec.Chart, "values.yaml"),
- filepath.Join(obj.Spec.Chart, "override.yaml"),
- }
- },
- want: sreconcile.ResultSuccess,
- assertFunc: func(g *WithT, build chart.Build) {
- g.Expect(build.Name).To(Equal("helmchart"))
- g.Expect(build.Version).To(Equal("0.1.0+3"))
- g.Expect(build.ResolvedDependencies).To(Equal(0))
- g.Expect(build.Path).To(BeARegularFile())
- },
- cleanFunc: func(g *WithT, build *chart.Build) {
- g.Expect(os.Remove(build.Path)).To(Succeed())
- },
- },
- {
- name: "Chart from storage cache",
- source: *chartsArtifact.DeepCopy(),
- beforeFunc: func(obj *sourcev1.HelmChart) {
- obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz"
- obj.Status.Artifact = cachedArtifact.DeepCopy()
- },
- want: sreconcile.ResultSuccess,
- assertFunc: func(g *WithT, build chart.Build) {
- g.Expect(build.Name).To(Equal("helmchart"))
- g.Expect(build.Version).To(Equal("0.1.0"))
- g.Expect(build.Path).To(Equal(storage.LocalPath(*cachedArtifact.DeepCopy())))
- g.Expect(build.Path).To(BeARegularFile())
- },
- },
- {
- name: "Generation change forces rebuild",
- source: *chartsArtifact.DeepCopy(),
- beforeFunc: func(obj *sourcev1.HelmChart) {
- obj.Generation = 2
- obj.Spec.Chart = "testdata/charts/helmchart-0.1.0.tgz"
- obj.Status.Artifact = cachedArtifact.DeepCopy()
- obj.Status.ObservedGeneration = 1
- },
- want: sreconcile.ResultSuccess,
- assertFunc: func(g *WithT, build chart.Build) {
- g.Expect(build.Name).To(Equal("helmchart"))
- g.Expect(build.Version).To(Equal("0.1.0"))
- g.Expect(build.Path).ToNot(Equal(storage.LocalPath(*cachedArtifact.DeepCopy())))
- g.Expect(build.Path).To(BeARegularFile())
- },
- cleanFunc: func(g *WithT, build *chart.Build) {
- g.Expect(os.Remove(build.Path)).To(Succeed())
- },
- },
- {
- name: "Empty source artifact",
- source: sourcev1.Artifact{},
- want: sreconcile.ResultEmpty,
- wantErr: &serror.Event{Err: errors.New("no such file or directory")},
- assertFunc: func(g *WithT, build chart.Build) {
- g.Expect(build.Complete()).To(BeFalse())
- },
- },
- {
- name: "Invalid artifact type",
- source: *yamlArtifact,
- want: sreconcile.ResultEmpty,
- wantErr: &serror.Event{Err: errors.New("artifact untar error: requires gzip-compressed body")},
- assertFunc: func(g *WithT, build chart.Build) {
- g.Expect(build.Complete()).To(BeFalse())
- },
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
-
- r := &HelmChartReconciler{
- Client: fake.NewClientBuilder().Build(),
- EventRecorder: record.NewFakeRecorder(32),
- Storage: storage,
- Getters: testGetters,
- RegistryClientGenerator: registry.ClientGenerator,
- }
-
- obj := &sourcev1.HelmChart{
- ObjectMeta: metav1.ObjectMeta{
- Name: "artifact",
- Namespace: "default",
- },
- Spec: sourcev1.HelmChartSpec{},
- }
- if tt.beforeFunc != nil {
- tt.beforeFunc(obj)
- }
-
- var b chart.Build
- if tt.cleanFunc != nil {
- defer tt.cleanFunc(g, &b)
- }
-
- got, err := r.buildFromTarballArtifact(context.TODO(), obj, tt.source, &b)
- g.Expect(err != nil).To(Equal(tt.wantErr != nil))
- if tt.wantErr != nil {
- g.Expect(reflect.TypeOf(err).String()).To(Equal(reflect.TypeOf(tt.wantErr).String()))
- g.Expect(err.Error()).To(ContainSubstring(tt.wantErr.Error()))
- }
- g.Expect(got).To(Equal(tt.want))
-
- if tt.assertFunc != nil {
- tt.assertFunc(g, b)
- }
- })
- }
-}
-
-func TestHelmChartReconciler_reconcileArtifact(t *testing.T) {
- tests := []struct {
- name string
- build *chart.Build
- beforeFunc func(obj *sourcev1.HelmChart)
- want sreconcile.Result
- wantErr bool
- assertConditions []metav1.Condition
- afterFunc func(t *WithT, obj *sourcev1.HelmChart)
- }{
- {
- name: "Incomplete build requeues and does not update status",
- build: &chart.Build{},
- beforeFunc: func(obj *sourcev1.HelmChart) {
- conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "")
- },
- want: sreconcile.ResultRequeue,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "Foo", ""),
- },
- },
- {
- name: "Copying artifact to storage from build makes ArtifactInStorage=True",
- build: mockChartBuild("helmchart", "0.1.0", "testdata/charts/helmchart-0.1.0.tgz"),
- beforeFunc: func(obj *sourcev1.HelmChart) {
- conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "")
- },
- afterFunc: func(t *WithT, obj *sourcev1.HelmChart) {
- t.Expect(obj.GetArtifact()).ToNot(BeNil())
- t.Expect(obj.GetArtifact().Checksum).To(Equal("bbdf96023c912c393b49d5238e227576ed0d20d1bb145d7476d817b80e20c11a"))
- t.Expect(obj.GetArtifact().Revision).To(Equal("0.1.0"))
- t.Expect(obj.Status.URL).ToNot(BeEmpty())
- t.Expect(obj.Status.ObservedChartName).To(Equal("helmchart"))
- },
- want: sreconcile.ResultSuccess,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, sourcev1.ChartPullSucceededReason, "pulled 'helmchart' chart with version '0.1.0'"),
- },
- },
- {
- name: "Up-to-date chart build does not persist artifact to storage",
- build: &chart.Build{
- Name: "helmchart",
- Version: "0.1.0",
- Path: filepath.Join(testStorage.BasePath, "testdata/charts/helmchart-0.1.0.tgz"),
- },
- beforeFunc: func(obj *sourcev1.HelmChart) {
- obj.Status.Artifact = &sourcev1.Artifact{
- Path: "testdata/charts/helmchart-0.1.0.tgz",
- }
- },
- want: sreconcile.ResultSuccess,
- afterFunc: func(t *WithT, obj *sourcev1.HelmChart) {
- t.Expect(obj.Status.Artifact.Path).To(Equal("testdata/charts/helmchart-0.1.0.tgz"))
- t.Expect(obj.Status.ObservedChartName).To(BeEmpty())
- t.Expect(obj.Status.URL).To(BeEmpty())
- },
- },
- {
- name: "Restores conditions in case artifact matches current chart build",
- build: &chart.Build{
- Name: "helmchart",
- Version: "0.1.0",
- Path: filepath.Join(testStorage.BasePath, "testdata/charts/helmchart-0.1.0.tgz"),
- Packaged: true,
- },
- beforeFunc: func(obj *sourcev1.HelmChart) {
- obj.Status.ObservedChartName = "helmchart"
- obj.Status.Artifact = &sourcev1.Artifact{
- Revision: "0.1.0",
- Path: "testdata/charts/helmchart-0.1.0.tgz",
- }
- },
- want: sreconcile.ResultSuccess,
- afterFunc: func(t *WithT, obj *sourcev1.HelmChart) {
- t.Expect(obj.Status.Artifact.Path).To(Equal("testdata/charts/helmchart-0.1.0.tgz"))
- t.Expect(obj.Status.URL).To(BeEmpty())
- },
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, sourcev1.ChartPackageSucceededReason, "packaged 'helmchart' chart with version '0.1.0'"),
- },
- },
- {
- name: "Removes ArtifactOutdatedCondition after creating new artifact",
- build: mockChartBuild("helmchart", "0.1.0", "testdata/charts/helmchart-0.1.0.tgz"),
- beforeFunc: func(obj *sourcev1.HelmChart) {
- conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "")
- },
- afterFunc: func(t *WithT, obj *sourcev1.HelmChart) {
- t.Expect(obj.GetArtifact()).ToNot(BeNil())
- t.Expect(obj.GetArtifact().Checksum).To(Equal("bbdf96023c912c393b49d5238e227576ed0d20d1bb145d7476d817b80e20c11a"))
- t.Expect(obj.GetArtifact().Revision).To(Equal("0.1.0"))
- t.Expect(obj.Status.URL).ToNot(BeEmpty())
- t.Expect(obj.Status.ObservedChartName).To(Equal("helmchart"))
- },
- want: sreconcile.ResultSuccess,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, sourcev1.ChartPullSucceededReason, "pulled 'helmchart' chart with version '0.1.0'"),
- },
- },
- {
- name: "Creates latest symlink to the created artifact",
- build: mockChartBuild("helmchart", "0.1.0", "testdata/charts/helmchart-0.1.0.tgz"),
- afterFunc: func(t *WithT, obj *sourcev1.HelmChart) {
- t.Expect(obj.GetArtifact()).ToNot(BeNil())
-
- localPath := testStorage.LocalPath(*obj.GetArtifact())
- symlinkPath := filepath.Join(filepath.Dir(localPath), "latest.tar.gz")
- targetFile, err := os.Readlink(symlinkPath)
- t.Expect(err).NotTo(HaveOccurred())
- t.Expect(localPath).To(Equal(targetFile))
- },
- want: sreconcile.ResultSuccess,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, sourcev1.ChartPullSucceededReason, "pulled 'helmchart' chart with version '0.1.0'"),
- },
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
-
- r := &HelmChartReconciler{
- EventRecorder: record.NewFakeRecorder(32),
- Storage: testStorage,
- }
-
- obj := &sourcev1.HelmChart{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "reconcile-artifact-",
- Generation: 1,
- },
- Status: sourcev1.HelmChartStatus{},
- }
- if tt.beforeFunc != nil {
- tt.beforeFunc(obj)
- }
-
- got, err := r.reconcileArtifact(ctx, obj, tt.build)
- g.Expect(err != nil).To(Equal(tt.wantErr))
- g.Expect(got).To(Equal(tt.want))
- g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions))
- if tt.afterFunc != nil {
- tt.afterFunc(g, obj)
- }
- })
- }
-}
-
-func TestHelmChartReconciler_getHelmRepositorySecret(t *testing.T) {
- mock := &corev1.Secret{
- TypeMeta: metav1.TypeMeta{
- Kind: "Secret",
- APIVersion: "v1",
- },
- ObjectMeta: metav1.ObjectMeta{
- Name: "secret",
- Namespace: "foo",
- },
- Data: map[string][]byte{
- "key": []byte("bar"),
- },
- }
- clientBuilder := fake.NewClientBuilder()
- clientBuilder.WithObjects(mock)
-
- r := &HelmChartReconciler{
- Client: clientBuilder.Build(),
- }
-
- tests := []struct {
- name string
- repository *sourcev1.HelmRepository
- want *corev1.Secret
- wantErr bool
- }{
- {
- name: "Existing secret reference",
- repository: &sourcev1.HelmRepository{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: mock.Namespace,
- },
- Spec: sourcev1.HelmRepositorySpec{
- SecretRef: &meta.LocalObjectReference{
- Name: mock.Name,
- },
- },
- },
- want: mock,
- },
- {
- name: "Empty secret reference",
- repository: &sourcev1.HelmRepository{
- Spec: sourcev1.HelmRepositorySpec{
- SecretRef: nil,
- },
- },
- want: nil,
- },
- {
- name: "Error on client error",
- repository: &sourcev1.HelmRepository{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: "different",
- },
- Spec: sourcev1.HelmRepositorySpec{
- SecretRef: &meta.LocalObjectReference{
- Name: mock.Name,
- },
- },
- },
- wantErr: true,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
-
- got, err := r.getHelmRepositorySecret(context.TODO(), tt.repository)
- g.Expect(err != nil).To(Equal(tt.wantErr))
- g.Expect(got).To(Equal(tt.want))
- })
- }
-}
-
-func TestHelmChartReconciler_getSource(t *testing.T) {
- mocks := []client.Object{
- &sourcev1.HelmRepository{
- TypeMeta: metav1.TypeMeta{
- Kind: sourcev1.HelmRepositoryKind,
- APIVersion: "source.toolkit.fluxcd.io/v1beta2",
- },
- ObjectMeta: metav1.ObjectMeta{
- Name: "helmrepository",
- Namespace: "foo",
- },
- },
- &sourcev1.GitRepository{
- TypeMeta: metav1.TypeMeta{
- Kind: sourcev1.GitRepositoryKind,
- APIVersion: "source.toolkit.fluxcd.io/v1beta2",
- },
- ObjectMeta: metav1.ObjectMeta{
- Name: "gitrepository",
- Namespace: "foo",
- },
- },
- &sourcev1.Bucket{
- TypeMeta: metav1.TypeMeta{
- Kind: sourcev1.BucketKind,
- APIVersion: "source.toolkit.fluxcd.io/v1beta2",
- },
- ObjectMeta: metav1.ObjectMeta{
- Name: "bucket",
- Namespace: "foo",
- },
- },
- }
- clientBuilder := fake.NewClientBuilder()
- clientBuilder.WithObjects(mocks...)
-
- r := &HelmChartReconciler{
- Client: clientBuilder.Build(),
- }
-
- tests := []struct {
- name string
- obj *sourcev1.HelmChart
- want sourcev1.Source
- wantErr bool
- }{
- {
- name: "Get HelmRepository source for reference",
- obj: &sourcev1.HelmChart{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: mocks[0].GetNamespace(),
- },
- Spec: sourcev1.HelmChartSpec{
- SourceRef: sourcev1.LocalHelmChartSourceReference{
- Name: mocks[0].GetName(),
- Kind: mocks[0].GetObjectKind().GroupVersionKind().Kind,
- },
- },
- },
- want: mocks[0].(sourcev1.Source),
- },
- {
- name: "Get GitRepository source for reference",
- obj: &sourcev1.HelmChart{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: mocks[1].GetNamespace(),
- },
- Spec: sourcev1.HelmChartSpec{
- SourceRef: sourcev1.LocalHelmChartSourceReference{
- Name: mocks[1].GetName(),
- Kind: mocks[1].GetObjectKind().GroupVersionKind().Kind,
- },
- },
- },
- want: mocks[1].(sourcev1.Source),
- },
- {
- name: "Get Bucket source for reference",
- obj: &sourcev1.HelmChart{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: mocks[2].GetNamespace(),
- },
- Spec: sourcev1.HelmChartSpec{
- SourceRef: sourcev1.LocalHelmChartSourceReference{
- Name: mocks[2].GetName(),
- Kind: mocks[2].GetObjectKind().GroupVersionKind().Kind,
- },
- },
- },
- want: mocks[2].(sourcev1.Source),
- },
- {
- name: "Error on client error",
- obj: &sourcev1.HelmChart{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: mocks[2].GetNamespace(),
- },
- Spec: sourcev1.HelmChartSpec{
- SourceRef: sourcev1.LocalHelmChartSourceReference{
- Name: mocks[1].GetName(),
- Kind: mocks[2].GetObjectKind().GroupVersionKind().Kind,
- },
- },
- },
- wantErr: true,
- },
- {
- name: "Error on unsupported source kind",
- obj: &sourcev1.HelmChart{
- Spec: sourcev1.HelmChartSpec{
- SourceRef: sourcev1.LocalHelmChartSourceReference{
- Name: "unsupported",
- Kind: "Unsupported",
- },
- },
- },
- wantErr: true,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
-
- got, err := r.getSource(context.TODO(), tt.obj)
-
- if tt.wantErr {
- g.Expect(err).To(HaveOccurred())
- g.Expect(got).To(BeNil())
- return
- }
-
- g.Expect(got).To(Equal(tt.want))
- g.Expect(err).ToNot(HaveOccurred())
- })
- }
-}
-
-func TestHelmChartReconciler_reconcileDelete(t *testing.T) {
- g := NewWithT(t)
-
- r := &HelmChartReconciler{
- EventRecorder: record.NewFakeRecorder(32),
- Storage: testStorage,
- }
-
- obj := &sourcev1.HelmChart{
- ObjectMeta: metav1.ObjectMeta{
- Name: "reconcile-delete-",
- DeletionTimestamp: &metav1.Time{Time: time.Now()},
- Finalizers: []string{
- sourcev1.SourceFinalizer,
- },
- },
- Status: sourcev1.HelmChartStatus{},
- }
-
- artifact := testStorage.NewArtifactFor(sourcev1.HelmChartKind, obj.GetObjectMeta(), "revision", "foo.txt")
- obj.Status.Artifact = &artifact
-
- got, err := r.reconcileDelete(ctx, obj)
- g.Expect(err).NotTo(HaveOccurred())
- g.Expect(got).To(Equal(sreconcile.ResultEmpty))
- g.Expect(controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer)).To(BeFalse())
- g.Expect(obj.Status.Artifact).To(BeNil())
-}
-
-func TestHelmChartReconciler_reconcileSubRecs(t *testing.T) {
- // Helper to build simple helmChartReconcileFunc with result and error.
- buildReconcileFuncs := func(r sreconcile.Result, e error) helmChartReconcileFunc {
- return func(_ context.Context, _ *sourcev1.HelmChart, _ *chart.Build) (sreconcile.Result, error) {
- return r, e
- }
- }
-
- tests := []struct {
- name string
- generation int64
- observedGeneration int64
- reconcileFuncs []helmChartReconcileFunc
- wantResult sreconcile.Result
- wantErr bool
- assertConditions []metav1.Condition
- }{
- {
- name: "successful reconciliations",
- reconcileFuncs: []helmChartReconcileFunc{
- buildReconcileFuncs(sreconcile.ResultSuccess, nil),
- },
- wantResult: sreconcile.ResultSuccess,
- wantErr: false,
- },
- {
- name: "successful reconciliation with generation difference",
- generation: 3,
- observedGeneration: 2,
- reconcileFuncs: []helmChartReconcileFunc{
- buildReconcileFuncs(sreconcile.ResultSuccess, nil),
- },
- wantResult: sreconcile.ResultSuccess,
- wantErr: false,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(meta.ReconcilingCondition, "NewGeneration", "reconciling new object generation (3)"),
- },
- },
- {
- name: "failed reconciliation",
- reconcileFuncs: []helmChartReconcileFunc{
- buildReconcileFuncs(sreconcile.ResultEmpty, fmt.Errorf("some error")),
- },
- wantResult: sreconcile.ResultEmpty,
- wantErr: true,
- },
- {
- name: "multiple object status conditions mutations",
- reconcileFuncs: []helmChartReconcileFunc{
- func(_ context.Context, obj *sourcev1.HelmChart, _ *chart.Build) (sreconcile.Result, error) {
- conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision")
- return sreconcile.ResultSuccess, nil
- },
- func(_ context.Context, obj *sourcev1.HelmChart, _ *chart.Build) (sreconcile.Result, error) {
- conditions.MarkTrue(obj, meta.ReconcilingCondition, "Progressing", "creating artifact")
- return sreconcile.ResultSuccess, nil
- },
- },
- wantResult: sreconcile.ResultSuccess,
- wantErr: false,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"),
- *conditions.TrueCondition(meta.ReconcilingCondition, "Progressing", "creating artifact"),
- },
- },
- {
- name: "subrecs with one result=Requeue, no error",
- reconcileFuncs: []helmChartReconcileFunc{
- buildReconcileFuncs(sreconcile.ResultSuccess, nil),
- buildReconcileFuncs(sreconcile.ResultRequeue, nil),
- buildReconcileFuncs(sreconcile.ResultSuccess, nil),
- },
- wantResult: sreconcile.ResultRequeue,
- wantErr: false,
- },
- {
- name: "subrecs with error before result=Requeue",
- reconcileFuncs: []helmChartReconcileFunc{
- buildReconcileFuncs(sreconcile.ResultSuccess, nil),
- buildReconcileFuncs(sreconcile.ResultEmpty, fmt.Errorf("some error")),
- buildReconcileFuncs(sreconcile.ResultRequeue, nil),
- },
- wantResult: sreconcile.ResultEmpty,
- wantErr: true,
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
-
- r := &HelmChartReconciler{}
- obj := &sourcev1.HelmChart{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "test-",
- Generation: tt.generation,
- },
- Status: sourcev1.HelmChartStatus{
- ObservedGeneration: tt.observedGeneration,
- },
- }
-
- got, err := r.reconcile(context.TODO(), obj, tt.reconcileFuncs)
- g.Expect(err != nil).To(Equal(tt.wantErr))
- g.Expect(got).To(Equal(tt.wantResult))
-
- g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions))
- })
- }
-}
-
-func mockChartBuild(name, version, path string) *chart.Build {
- var copyP string
- if path != "" {
- f, err := os.Open(path)
- if err == nil {
- defer f.Close()
- ff, err := os.CreateTemp("", "chart-mock-*.tgz")
- if err == nil {
- defer ff.Close()
- if _, err = io.Copy(ff, f); err == nil {
- copyP = ff.Name()
- }
- }
- }
- }
- return &chart.Build{
- Name: name,
- Version: version,
- Path: copyP,
- }
-}
-
-func TestHelmChartReconciler_statusConditions(t *testing.T) {
- tests := []struct {
- name string
- beforeFunc func(obj *sourcev1.HelmChart)
- assertConditions []metav1.Condition
- }{
- {
- name: "positive conditions only",
- beforeFunc: func(obj *sourcev1.HelmChart) {
- conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision")
- },
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision"),
- *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision"),
- },
- },
- {
- name: "multiple failures",
- beforeFunc: func(obj *sourcev1.HelmChart) {
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret")
- conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory")
- conditions.MarkTrue(obj, sourcev1.BuildFailedCondition, "ChartPackageError", "some error")
- conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "some error")
- },
- assertConditions: []metav1.Condition{
- *conditions.FalseCondition(meta.ReadyCondition, sourcev1.DirCreationFailedReason, "failed to create directory"),
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"),
- *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory"),
- *conditions.TrueCondition(sourcev1.BuildFailedCondition, "ChartPackageError", "some error"),
- *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "some error"),
- },
- },
- {
- name: "mixed positive and negative conditions",
- beforeFunc: func(obj *sourcev1.HelmChart) {
- conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision")
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret")
- },
- assertConditions: []metav1.Condition{
- *conditions.FalseCondition(meta.ReadyCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"),
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"),
- *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision"),
- },
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
-
- obj := &sourcev1.HelmChart{
- TypeMeta: metav1.TypeMeta{
- Kind: sourcev1.HelmChartKind,
- APIVersion: "source.toolkit.fluxcd.io/v1beta2",
- },
- ObjectMeta: metav1.ObjectMeta{
- Name: "helmchart",
- Namespace: "foo",
- },
- }
- clientBuilder := fake.NewClientBuilder()
- clientBuilder.WithObjects(obj)
- c := clientBuilder.Build()
-
- patchHelper, err := patch.NewHelper(obj, c)
- g.Expect(err).ToNot(HaveOccurred())
-
- if tt.beforeFunc != nil {
- tt.beforeFunc(obj)
- }
-
- ctx := context.TODO()
- recResult := sreconcile.ResultSuccess
- var retErr error
-
- summarizeHelper := summarize.NewHelper(record.NewFakeRecorder(32), patchHelper)
- summarizeOpts := []summarize.Option{
- summarize.WithConditions(helmChartReadyCondition),
- summarize.WithReconcileResult(recResult),
- summarize.WithReconcileError(retErr),
- summarize.WithIgnoreNotFound(),
- summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetRequeueAfter()}),
- summarize.WithPatchFieldOwner("source-controller"),
- }
- _, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...)
-
- key := client.ObjectKeyFromObject(obj)
- g.Expect(c.Get(ctx, key, obj)).ToNot(HaveOccurred())
- g.Expect(obj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions))
- })
- }
-}
-
-func TestHelmChartReconciler_notify(t *testing.T) {
- tests := []struct {
- name string
- res sreconcile.Result
- resErr error
- oldObjBeforeFunc func(obj *sourcev1.HelmChart)
- newObjBeforeFunc func(obj *sourcev1.HelmChart)
- wantEvent string
- }{
- {
- name: "error - no event",
- res: sreconcile.ResultEmpty,
- resErr: errors.New("some error"),
- },
- {
- name: "new artifact",
- res: sreconcile.ResultSuccess,
- resErr: nil,
- newObjBeforeFunc: func(obj *sourcev1.HelmChart) {
- obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
- },
- wantEvent: "Normal ChartPackageSucceeded packaged",
- },
- {
- name: "recovery from failure",
- res: sreconcile.ResultSuccess,
- resErr: nil,
- oldObjBeforeFunc: func(obj *sourcev1.HelmChart) {
- obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail")
- conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo")
- },
- newObjBeforeFunc: func(obj *sourcev1.HelmChart) {
- obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
- conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
- },
- wantEvent: "Normal ChartPackageSucceeded packaged",
- },
- {
- name: "recovery and new artifact",
- res: sreconcile.ResultSuccess,
- resErr: nil,
- oldObjBeforeFunc: func(obj *sourcev1.HelmChart) {
- obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail")
- conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo")
- },
- newObjBeforeFunc: func(obj *sourcev1.HelmChart) {
- obj.Status.Artifact = &sourcev1.Artifact{Revision: "aaa", Checksum: "bbb"}
- conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
- },
- wantEvent: "Normal ChartPackageSucceeded packaged",
- },
- {
- name: "no updates",
- res: sreconcile.ResultSuccess,
- resErr: nil,
- oldObjBeforeFunc: func(obj *sourcev1.HelmChart) {
- obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
- conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
- },
- newObjBeforeFunc: func(obj *sourcev1.HelmChart) {
- obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
- conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
- },
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
- recorder := record.NewFakeRecorder(32)
-
- oldObj := &sourcev1.HelmChart{}
- newObj := oldObj.DeepCopy()
-
- if tt.oldObjBeforeFunc != nil {
- tt.oldObjBeforeFunc(oldObj)
- }
- if tt.newObjBeforeFunc != nil {
- tt.newObjBeforeFunc(newObj)
- }
-
- reconciler := &HelmChartReconciler{
- EventRecorder: recorder,
- }
- build := &chart.Build{
- Name: "foo",
- Version: "1.0.0",
- Path: "some/path",
- Packaged: true,
- }
- reconciler.notify(ctx, oldObj, newObj, build, tt.res, tt.resErr)
-
- select {
- case x, ok := <-recorder.Events:
- g.Expect(ok).To(Equal(tt.wantEvent != ""), "unexpected event received")
- if tt.wantEvent != "" {
- g.Expect(x).To(ContainSubstring(tt.wantEvent))
- }
- default:
- if tt.wantEvent != "" {
- t.Errorf("expected some event to be emitted")
- }
- }
- })
- }
-}
-
-// extractChartMeta is used to extract a chart metadata from a byte array
-func extractChartMeta(chartData []byte) (*hchart.Metadata, error) {
- ch, err := loader.LoadArchive(bytes.NewReader(chartData))
- if err != nil {
- return nil, err
- }
- return ch.Metadata, nil
-}
diff --git a/controllers/helmrepository_controller_oci.go b/controllers/helmrepository_controller_oci.go
deleted file mode 100644
index a7d812fa0..000000000
--- a/controllers/helmrepository_controller_oci.go
+++ /dev/null
@@ -1,368 +0,0 @@
-/*
-Copyright 2022 The Flux authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package controllers
-
-import (
- "context"
- "errors"
- "fmt"
- "net/url"
- "os"
- "time"
-
- helmgetter "helm.sh/helm/v3/pkg/getter"
- helmreg "helm.sh/helm/v3/pkg/registry"
- corev1 "k8s.io/api/core/v1"
- apierrors "k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/types"
- kerrors "k8s.io/apimachinery/pkg/util/errors"
- "k8s.io/apimachinery/pkg/util/uuid"
- kuberecorder "k8s.io/client-go/tools/record"
- ctrl "sigs.k8s.io/controller-runtime"
- "sigs.k8s.io/controller-runtime/pkg/client"
- "sigs.k8s.io/controller-runtime/pkg/controller"
- "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
- "sigs.k8s.io/controller-runtime/pkg/predicate"
-
- "github.com/fluxcd/pkg/apis/meta"
- "github.com/fluxcd/pkg/runtime/conditions"
- helper "github.com/fluxcd/pkg/runtime/controller"
- "github.com/fluxcd/pkg/runtime/patch"
- "github.com/fluxcd/pkg/runtime/predicates"
-
- "github.com/fluxcd/source-controller/api/v1beta2"
- sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
- "github.com/fluxcd/source-controller/internal/helm/registry"
- "github.com/fluxcd/source-controller/internal/helm/repository"
- "github.com/fluxcd/source-controller/internal/object"
- intpredicates "github.com/fluxcd/source-controller/internal/predicates"
-)
-
-var helmRepositoryOCIOwnedConditions = []string{
- meta.ReadyCondition,
- meta.ReconcilingCondition,
- meta.StalledCondition,
-}
-
-var helmRepositoryOCINegativeConditions = []string{
- meta.StalledCondition,
- meta.ReconcilingCondition,
-}
-
-// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmrepositories,verbs=get;list;watch;create;update;patch;delete
-// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmrepositories/status,verbs=get;update;patch
-// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmrepositories/finalizers,verbs=get;create;update;patch;delete
-// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch
-
-// HelmRepositoryOCI Reconciler reconciles a v1beta2.HelmRepository object of type OCI.
-type HelmRepositoryOCIReconciler struct {
- client.Client
- kuberecorder.EventRecorder
- helper.Metrics
- Getters helmgetter.Providers
- ControllerName string
- RegistryClientGenerator RegistryClientGeneratorFunc
-}
-
-// RegistryClientGeneratorFunc is a function that returns a registry client
-// and an optional file name.
-// The file is used to store the registry client credentials.
-// The caller is responsible for deleting the file.
-type RegistryClientGeneratorFunc func(isLogin bool) (*helmreg.Client, string, error)
-
-func (r *HelmRepositoryOCIReconciler) SetupWithManager(mgr ctrl.Manager) error {
- return r.SetupWithManagerAndOptions(mgr, HelmRepositoryReconcilerOptions{})
-}
-
-func (r *HelmRepositoryOCIReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts HelmRepositoryReconcilerOptions) error {
- return ctrl.NewControllerManagedBy(mgr).
- For(&sourcev1.HelmRepository{}).
- WithEventFilter(
- predicate.And(
- intpredicates.HelmRepositoryTypePredicate{RepositoryType: sourcev1.HelmRepositoryTypeOCI},
- predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{}),
- ),
- ).
- WithOptions(controller.Options{
- MaxConcurrentReconciles: opts.MaxConcurrentReconciles,
- RateLimiter: opts.RateLimiter,
- RecoverPanic: true,
- }).
- Complete(r)
-}
-
-func (r *HelmRepositoryOCIReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, retErr error) {
- start := time.Now()
- log := ctrl.LoggerFrom(ctx).
- // Sets a reconcile ID to correlate logs from all suboperations.
- WithValues("reconcileID", uuid.NewUUID())
-
- // logger will be associated to the new context that is
- // returned from ctrl.LoggerInto.
- ctx = ctrl.LoggerInto(ctx, log)
-
- // Fetch the HelmRepository
- obj := &sourcev1.HelmRepository{}
- if err := r.Get(ctx, req.NamespacedName, obj); err != nil {
- return ctrl.Result{}, client.IgnoreNotFound(err)
- }
-
- // Record suspended status metric
- r.RecordSuspend(ctx, obj, obj.Spec.Suspend)
-
- // Return early if the object is suspended
- if obj.Spec.Suspend {
- log.Info("reconciliation is suspended for this object")
- return ctrl.Result{}, nil
- }
-
- // Initialize the patch helper with the current version of the object.
- patchHelper, err := patch.NewHelper(obj, r.Client)
- if err != nil {
- return ctrl.Result{}, err
- }
-
- // Always attempt to patch the object after each reconciliation.
- defer func() {
- // Patch the object, prioritizing the conditions owned by the controller in
- // case of any conflicts.
- patchOpts := []patch.Option{
- patch.WithOwnedConditions{
- Conditions: helmRepositoryOCIOwnedConditions,
- },
- }
- patchOpts = append(patchOpts, patch.WithFieldOwner(r.ControllerName))
- // If a reconcile annotation value is found, set it in the object status
- // as status.lastHandledReconcileAt.
- if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok {
- object.SetStatusLastHandledReconcileAt(obj, v)
- }
-
- // Set status observed generation option if the object is stalled, or
- // if the object is ready.
- if conditions.IsStalled(obj) || conditions.IsReady(obj) {
- patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{})
- }
-
- if err = patchHelper.Patch(ctx, obj, patchOpts...); err != nil {
- // Ignore patch error "not found" when the object is being deleted.
- if !obj.GetDeletionTimestamp().IsZero() {
- err = kerrors.FilterOut(err, func(e error) bool { return apierrors.IsNotFound(e) })
- }
- retErr = kerrors.NewAggregate([]error{retErr, err})
- }
-
- // Always record readiness and duration metrics
- r.Metrics.RecordReadiness(ctx, obj)
- r.Metrics.RecordDuration(ctx, obj, start)
- }()
-
- // Add finalizer first if it doesn't exist to avoid the race condition
- // between init and delete.
- if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) {
- controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer)
- return ctrl.Result{Requeue: true}, nil
- }
-
- // Examine if the object is under deletion.
- if !obj.ObjectMeta.DeletionTimestamp.IsZero() {
- return r.reconcileDelete(ctx, obj)
- }
-
- // Examine if a type change has happened and act accordingly
- if obj.Spec.Type != sourcev1.HelmRepositoryTypeOCI {
- // Remove any stale condition and ignore the object if the type has
- // changed.
- obj.Status.Conditions = nil
- return ctrl.Result{}, nil
- }
-
- result, retErr = r.reconcile(ctx, obj)
- return
-}
-
-// reconcile reconciles the HelmRepository object. While reconciling, when an
-// error is encountered, it sets the failure details in the appropriate status
-// condition type and returns the error with appropriate ctrl.Result. The object
-// status conditions and the returned results are evaluated in the deferred
-// block at the very end to summarize the conditions to be in a consistent
-// state.
-func (r *HelmRepositoryOCIReconciler) reconcile(ctx context.Context, obj *v1beta2.HelmRepository) (result ctrl.Result, retErr error) {
- oldObj := obj.DeepCopy()
-
- defer func() {
- // If it's stalled, ensure reconciling is removed.
- if sc := conditions.Get(obj, meta.StalledCondition); sc != nil && sc.Status == metav1.ConditionTrue {
- conditions.Delete(obj, meta.ReconcilingCondition)
- }
-
- // Check if it's a successful reconciliation.
- if result.RequeueAfter == obj.GetRequeueAfter() && result.Requeue == false &&
- retErr == nil {
- // Remove reconciling condition if the reconciliation was successful.
- conditions.Delete(obj, meta.ReconcilingCondition)
- // If it's not ready even though it's not reconciling or stalled,
- // set the ready failure message as the error.
- // Based on isNonStalledSuccess() from internal/reconcile/summarize.
- if ready := conditions.Get(obj, meta.ReadyCondition); ready != nil &&
- ready.Status == metav1.ConditionFalse && !conditions.IsStalled(obj) {
- retErr = errors.New(conditions.GetMessage(obj, meta.ReadyCondition))
- }
- }
-
- // If it's still a successful reconciliation and it's not reconciling or
- // stalled, mark Ready=True.
- if !conditions.IsReconciling(obj) && !conditions.IsStalled(obj) &&
- retErr == nil && result.RequeueAfter == obj.GetRequeueAfter() {
- conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "Helm repository is ready")
- }
-
- // Emit events when object's state changes.
- ready := conditions.Get(obj, meta.ReadyCondition)
- // Became ready from not ready.
- if !conditions.IsReady(oldObj) && conditions.IsReady(obj) {
- r.eventLogf(ctx, obj, corev1.EventTypeNormal, ready.Reason, ready.Message)
- }
- // Became not ready from ready.
- if conditions.IsReady(oldObj) && !conditions.IsReady(obj) {
- r.eventLogf(ctx, obj, corev1.EventTypeWarning, ready.Reason, ready.Message)
- }
- }()
-
- // Set reconciling condition.
- if obj.Generation != obj.Status.ObservedGeneration {
- conditions.MarkReconciling(obj, "NewGeneration", "reconciling new object generation (%d)", obj.Generation)
- }
-
- // Ensure that it's an OCI URL before continuing.
- if !helmreg.IsOCI(obj.Spec.URL) {
- u, err := url.Parse(obj.Spec.URL)
- if err != nil {
- err = fmt.Errorf("failed to parse URL: %w", err)
- } else {
- err = fmt.Errorf("URL scheme '%s' in '%s' is not supported", u.Scheme, obj.Spec.URL)
- }
- conditions.MarkStalled(obj, sourcev1.URLInvalidReason, err.Error())
- conditions.MarkFalse(obj, meta.ReadyCondition, sourcev1.URLInvalidReason, err.Error())
- ctrl.LoggerFrom(ctx).Error(err, "reconciliation stalled")
- result, retErr = ctrl.Result{}, nil
- return
- }
- conditions.Delete(obj, meta.StalledCondition)
-
- var loginOpts []helmreg.LoginOption
- // Configure any authentication related options.
- if obj.Spec.SecretRef != nil {
- // Attempt to retrieve secret.
- name := types.NamespacedName{
- Namespace: obj.GetNamespace(),
- Name: obj.Spec.SecretRef.Name,
- }
- var secret corev1.Secret
- if err := r.Client.Get(ctx, name, &secret); err != nil {
- e := fmt.Errorf("failed to get secret '%s': %w", name.String(), err)
- conditions.MarkFalse(obj, meta.ReadyCondition, sourcev1.AuthenticationFailedReason, e.Error())
- result, retErr = ctrl.Result{}, e
- return
- }
-
- // Construct login options.
- loginOpt, err := registry.LoginOptionFromSecret(obj.Spec.URL, secret)
- if err != nil {
- e := fmt.Errorf("failed to configure Helm client with secret data: %w", err)
- conditions.MarkFalse(obj, meta.ReadyCondition, sourcev1.AuthenticationFailedReason, e.Error())
- result, retErr = ctrl.Result{}, e
- return
- }
-
- if loginOpt != nil {
- loginOpts = append(loginOpts, loginOpt)
- }
- }
-
- // Create registry client and login if needed.
- registryClient, file, err := r.RegistryClientGenerator(loginOpts != nil)
- if err != nil {
- e := fmt.Errorf("failed to create registry client: %w", err)
- conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, e.Error())
- result, retErr = ctrl.Result{}, e
- return
- }
- if file != "" {
- defer func() {
- if err := os.Remove(file); err != nil {
- r.eventLogf(ctx, obj, corev1.EventTypeWarning, meta.FailedReason,
- "failed to delete temporary credentials file: %s", err)
- }
- }()
- }
-
- chartRepo, err := repository.NewOCIChartRepository(obj.Spec.URL, repository.WithOCIRegistryClient(registryClient))
- if err != nil {
- e := fmt.Errorf("failed to parse URL '%s': %w", obj.Spec.URL, err)
- conditions.MarkStalled(obj, sourcev1.URLInvalidReason, e.Error())
- conditions.MarkFalse(obj, meta.ReadyCondition, sourcev1.URLInvalidReason, e.Error())
- result, retErr = ctrl.Result{}, nil
- return
- }
- conditions.Delete(obj, meta.StalledCondition)
-
- // Attempt to login to the registry if credentials are provided.
- if loginOpts != nil {
- err = chartRepo.Login(loginOpts...)
- if err != nil {
- e := fmt.Errorf("failed to login to registry '%s': %w", obj.Spec.URL, err)
- conditions.MarkFalse(obj, meta.ReadyCondition, sourcev1.AuthenticationFailedReason, e.Error())
- result, retErr = ctrl.Result{}, e
- return
- }
- }
-
- // Remove any stale Ready condition, most likely False, set above. Its value
- // is derived from the overall result of the reconciliation in the deferred
- // block at the very end.
- conditions.Delete(obj, meta.ReadyCondition)
-
- result, retErr = ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil
- return
-}
-
-func (r *HelmRepositoryOCIReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.HelmRepository) (ctrl.Result, error) {
- // Remove our finalizer from the list
- controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer)
-
- // Stop reconciliation as the object is being deleted
- return ctrl.Result{}, nil
-}
-
-// eventLogf records events, and logs at the same time.
-//
-// This log is different from the debug log in the EventRecorder, in the sense
-// that this is a simple log. While the debug log contains complete details
-// about the event.
-func (r *HelmRepositoryOCIReconciler) eventLogf(ctx context.Context, obj runtime.Object, eventType string, reason string, messageFmt string, args ...interface{}) {
- msg := fmt.Sprintf(messageFmt, args...)
- // Log and emit event.
- if eventType == corev1.EventTypeWarning {
- ctrl.LoggerFrom(ctx).Error(errors.New(reason), msg)
- } else {
- ctrl.LoggerFrom(ctx).Info(msg)
- }
- r.Eventf(obj, eventType, reason, msg)
-}
diff --git a/controllers/helmrepository_controller_oci_test.go b/controllers/helmrepository_controller_oci_test.go
deleted file mode 100644
index 62d49ec29..000000000
--- a/controllers/helmrepository_controller_oci_test.go
+++ /dev/null
@@ -1,163 +0,0 @@
-/*
-Copyright 2022 The Flux authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package controllers
-
-import (
- "encoding/base64"
- "fmt"
- "testing"
-
- "github.com/darkowlzz/controller-check/status"
- "github.com/fluxcd/pkg/apis/meta"
- "github.com/fluxcd/pkg/runtime/conditions"
- "github.com/fluxcd/pkg/runtime/patch"
- sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
- . "github.com/onsi/gomega"
- corev1 "k8s.io/api/core/v1"
- apierrors "k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- kstatus "sigs.k8s.io/cli-utils/pkg/kstatus/status"
- "sigs.k8s.io/controller-runtime/pkg/client"
-)
-
-func TestHelmRepositoryOCIReconciler_Reconcile(t *testing.T) {
- tests := []struct {
- name string
- secretType corev1.SecretType
- secretData map[string][]byte
- }{
- {
- name: "valid auth data",
- secretData: map[string][]byte{
- "username": []byte(testRegistryUsername),
- "password": []byte(testRegistryPassword),
- },
- },
- {
- name: "no auth data",
- secretData: nil,
- },
- {
- name: "dockerconfigjson Secret",
- secretType: corev1.SecretTypeDockerConfigJson,
- secretData: map[string][]byte{
- ".dockerconfigjson": []byte(`{"auths":{"` +
- testRegistryServer.registryHost + `":{"` +
- `auth":"` + base64.StdEncoding.EncodeToString([]byte(testRegistryUsername+":"+testRegistryPassword)) + `"}}}`),
- },
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
-
- ns, err := testEnv.CreateNamespace(ctx, "helmrepository-oci-reconcile-test")
- g.Expect(err).ToNot(HaveOccurred())
- defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }()
-
- secret := &corev1.Secret{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "helmrepository-",
- Namespace: ns.Name,
- },
- Data: tt.secretData,
- }
- if tt.secretType != "" {
- secret.Type = tt.secretType
- }
-
- g.Expect(testEnv.CreateAndWait(ctx, secret)).To(Succeed())
-
- obj := &sourcev1.HelmRepository{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "helmrepository-oci-reconcile-",
- Namespace: ns.Name,
- },
- Spec: sourcev1.HelmRepositorySpec{
- Interval: metav1.Duration{Duration: interval},
- URL: fmt.Sprintf("oci://%s", testRegistryServer.registryHost),
- SecretRef: &meta.LocalObjectReference{
- Name: secret.Name,
- },
- Type: sourcev1.HelmRepositoryTypeOCI,
- },
- }
- g.Expect(testEnv.Create(ctx, obj)).To(Succeed())
-
- key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace}
-
- // Wait for finalizer to be set
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, obj); err != nil {
- return false
- }
- return len(obj.Finalizers) > 0
- }, timeout).Should(BeTrue())
-
- // Wait for HelmRepository to be Ready
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, obj); err != nil {
- return false
- }
- if !conditions.IsReady(obj) {
- return false
- }
- readyCondition := conditions.Get(obj, meta.ReadyCondition)
- return obj.Generation == readyCondition.ObservedGeneration &&
- obj.Generation == obj.Status.ObservedGeneration
- }, timeout).Should(BeTrue())
-
- // Check if the object status is valid.
- condns := &status.Conditions{NegativePolarity: helmRepositoryReadyCondition.NegativePolarity}
- checker := status.NewChecker(testEnv.Client, condns)
- checker.CheckErr(ctx, obj)
-
- // kstatus client conformance check.
- u, err := patch.ToUnstructured(obj)
- g.Expect(err).ToNot(HaveOccurred())
- res, err := kstatus.Compute(u)
- g.Expect(err).ToNot(HaveOccurred())
- g.Expect(res.Status).To(Equal(kstatus.CurrentStatus))
-
- // Patch the object with reconcile request annotation.
- patchHelper, err := patch.NewHelper(obj, testEnv.Client)
- g.Expect(err).ToNot(HaveOccurred())
- annotations := map[string]string{
- meta.ReconcileRequestAnnotation: "now",
- }
- obj.SetAnnotations(annotations)
- g.Expect(patchHelper.Patch(ctx, obj)).ToNot(HaveOccurred())
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, obj); err != nil {
- return false
- }
- return obj.Status.LastHandledReconcileAt == "now"
- }, timeout).Should(BeTrue())
-
- g.Expect(testEnv.Delete(ctx, obj)).To(Succeed())
-
- // Wait for HelmRepository to be deleted
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, obj); err != nil {
- return apierrors.IsNotFound(err)
- }
- return false
- }, timeout).Should(BeTrue())
- })
- }
-}
diff --git a/controllers/helmrepository_controller_test.go b/controllers/helmrepository_controller_test.go
deleted file mode 100644
index 2e8df4873..000000000
--- a/controllers/helmrepository_controller_test.go
+++ /dev/null
@@ -1,1359 +0,0 @@
-/*
-Copyright 2020 The Flux authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package controllers
-
-import (
- "context"
- "crypto/tls"
- "errors"
- "fmt"
- "net/http"
- "os"
- "path/filepath"
- "strings"
- "testing"
- "time"
-
- "github.com/darkowlzz/controller-check/status"
- "github.com/fluxcd/pkg/apis/meta"
- "github.com/fluxcd/pkg/helmtestserver"
- "github.com/fluxcd/pkg/runtime/conditions"
- "github.com/fluxcd/pkg/runtime/patch"
- . "github.com/onsi/gomega"
- helmgetter "helm.sh/helm/v3/pkg/getter"
- corev1 "k8s.io/api/core/v1"
- apierrors "k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/client-go/tools/record"
- kstatus "sigs.k8s.io/cli-utils/pkg/kstatus/status"
- "sigs.k8s.io/controller-runtime/pkg/client"
- "sigs.k8s.io/controller-runtime/pkg/client/fake"
- fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake"
-
- sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
- "github.com/fluxcd/source-controller/internal/helm/getter"
- "github.com/fluxcd/source-controller/internal/helm/repository"
- sreconcile "github.com/fluxcd/source-controller/internal/reconcile"
- "github.com/fluxcd/source-controller/internal/reconcile/summarize"
-)
-
-func TestHelmRepositoryReconciler_Reconcile(t *testing.T) {
- g := NewWithT(t)
-
- testServer, err := helmtestserver.NewTempHelmServer()
- g.Expect(err).NotTo(HaveOccurred())
- defer os.RemoveAll(testServer.Root())
-
- g.Expect(testServer.PackageChart("testdata/charts/helmchart")).To(Succeed())
- g.Expect(testServer.GenerateIndex()).To(Succeed())
-
- testServer.Start()
- defer testServer.Stop()
-
- obj := &sourcev1.HelmRepository{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "helmrepository-reconcile-",
- Namespace: "default",
- },
- Spec: sourcev1.HelmRepositorySpec{
- Interval: metav1.Duration{Duration: interval},
- URL: testServer.URL(),
- },
- }
- g.Expect(testEnv.Create(ctx, obj)).To(Succeed())
-
- key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace}
-
- // Wait for finalizer to be set
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, obj); err != nil {
- return false
- }
- return len(obj.Finalizers) > 0
- }, timeout).Should(BeTrue())
-
- // Wait for HelmRepository to be Ready
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, obj); err != nil {
- return false
- }
- if !conditions.IsReady(obj) && obj.Status.Artifact == nil {
- return false
- }
- readyCondition := conditions.Get(obj, meta.ReadyCondition)
- return readyCondition.Status == metav1.ConditionTrue &&
- obj.Generation == readyCondition.ObservedGeneration &&
- obj.Generation == obj.Status.ObservedGeneration
- }, timeout).Should(BeTrue())
-
- // Check if the object status is valid.
- condns := &status.Conditions{NegativePolarity: helmRepositoryReadyCondition.NegativePolarity}
- checker := status.NewChecker(testEnv.Client, condns)
- checker.CheckErr(ctx, obj)
-
- // kstatus client conformance check.
- u, err := patch.ToUnstructured(obj)
- g.Expect(err).ToNot(HaveOccurred())
- res, err := kstatus.Compute(u)
- g.Expect(err).ToNot(HaveOccurred())
- g.Expect(res.Status).To(Equal(kstatus.CurrentStatus))
-
- // Patch the object with reconcile request annotation.
- patchHelper, err := patch.NewHelper(obj, testEnv.Client)
- g.Expect(err).ToNot(HaveOccurred())
- annotations := map[string]string{
- meta.ReconcileRequestAnnotation: "now",
- }
- obj.SetAnnotations(annotations)
- g.Expect(patchHelper.Patch(ctx, obj)).ToNot(HaveOccurred())
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, obj); err != nil {
- return false
- }
- return obj.Status.LastHandledReconcileAt == "now"
- }, timeout).Should(BeTrue())
-
- g.Expect(testEnv.Delete(ctx, obj)).To(Succeed())
-
- // Wait for HelmRepository to be deleted
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, obj); err != nil {
- return apierrors.IsNotFound(err)
- }
- return false
- }, timeout).Should(BeTrue())
-}
-
-func TestHelmRepositoryReconciler_reconcileStorage(t *testing.T) {
- tests := []struct {
- name string
- beforeFunc func(obj *sourcev1.HelmRepository, storage *Storage) error
- want sreconcile.Result
- wantErr bool
- assertArtifact *sourcev1.Artifact
- assertConditions []metav1.Condition
- assertPaths []string
- }{
- {
- name: "garbage collects",
- beforeFunc: func(obj *sourcev1.HelmRepository, storage *Storage) error {
- revisions := []string{"a", "b", "c", "d"}
- for n := range revisions {
- v := revisions[n]
- obj.Status.Artifact = &sourcev1.Artifact{
- Path: fmt.Sprintf("/reconcile-storage/%s.txt", v),
- Revision: v,
- }
- if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil {
- return err
- }
- if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(v), 0o640); err != nil {
- return err
- }
- if n != len(revisions)-1 {
- time.Sleep(time.Second * 1)
- }
- }
- testStorage.SetArtifactURL(obj.Status.Artifact)
- return nil
- },
- assertArtifact: &sourcev1.Artifact{
- Path: "/reconcile-storage/d.txt",
- Revision: "d",
- Checksum: "18ac3e7343f016890c510e93f935261169d9e3f565436429830faf0934f4f8e4",
- URL: testStorage.Hostname + "/reconcile-storage/d.txt",
- Size: int64p(int64(len("d"))),
- },
- assertPaths: []string{
- "/reconcile-storage/d.txt",
- "/reconcile-storage/c.txt",
- "!/reconcile-storage/b.txt",
- "!/reconcile-storage/a.txt",
- },
- want: sreconcile.ResultSuccess,
- },
- {
- name: "notices missing artifact in storage",
- beforeFunc: func(obj *sourcev1.HelmRepository, storage *Storage) error {
- obj.Status.Artifact = &sourcev1.Artifact{
- Path: "/reconcile-storage/invalid.txt",
- Revision: "d",
- }
- testStorage.SetArtifactURL(obj.Status.Artifact)
- return nil
- },
- want: sreconcile.ResultSuccess,
- assertPaths: []string{
- "!/reconcile-storage/invalid.txt",
- },
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(meta.ReconcilingCondition, "NoArtifact", "no artifact for resource in storage"),
- },
- },
- {
- name: "updates hostname on diff from current",
- beforeFunc: func(obj *sourcev1.HelmRepository, storage *Storage) error {
- obj.Status.Artifact = &sourcev1.Artifact{
- Path: "/reconcile-storage/hostname.txt",
- Revision: "f",
- Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80",
- URL: "http://outdated.com/reconcile-storage/hostname.txt",
- }
- if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil {
- return err
- }
- if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader("file"), 0o640); err != nil {
- return err
- }
- return nil
- },
- want: sreconcile.ResultSuccess,
- assertPaths: []string{
- "/reconcile-storage/hostname.txt",
- },
- assertArtifact: &sourcev1.Artifact{
- Path: "/reconcile-storage/hostname.txt",
- Revision: "f",
- Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80",
- URL: testStorage.Hostname + "/reconcile-storage/hostname.txt",
- Size: int64p(int64(len("file"))),
- },
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
-
- r := &HelmRepositoryReconciler{
- EventRecorder: record.NewFakeRecorder(32),
- Storage: testStorage,
- }
-
- obj := &sourcev1.HelmRepository{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "test-",
- },
- }
- if tt.beforeFunc != nil {
- g.Expect(tt.beforeFunc(obj, testStorage)).To(Succeed())
- }
-
- var chartRepo repository.ChartRepository
- var artifact sourcev1.Artifact
-
- got, err := r.reconcileStorage(context.TODO(), obj, &artifact, &chartRepo)
- g.Expect(err != nil).To(Equal(tt.wantErr))
- g.Expect(got).To(Equal(tt.want))
-
- g.Expect(obj.Status.Artifact).To(MatchArtifact(tt.assertArtifact))
- if tt.assertArtifact != nil && tt.assertArtifact.URL != "" {
- g.Expect(obj.Status.Artifact.URL).To(Equal(tt.assertArtifact.URL))
- }
- g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions))
-
- for _, p := range tt.assertPaths {
- absoluteP := filepath.Join(testStorage.BasePath, p)
- if !strings.HasPrefix(p, "!") {
- g.Expect(absoluteP).To(BeAnExistingFile())
- continue
- }
- g.Expect(absoluteP).NotTo(BeAnExistingFile())
- }
- })
- }
-}
-
-func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) {
- type options struct {
- username string
- password string
- publicKey []byte
- privateKey []byte
- ca []byte
- }
-
- tests := []struct {
- name string
- protocol string
- server options
- secret *corev1.Secret
- beforeFunc func(t *WithT, obj *sourcev1.HelmRepository, checksum string)
- afterFunc func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo repository.ChartRepository)
- want sreconcile.Result
- wantErr bool
- assertConditions []metav1.Condition
- }{
- {
- name: "HTTP without secretRef makes ArtifactOutdated=True",
- protocol: "http",
- want: sreconcile.ResultSuccess,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"),
- *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new index revision"),
- },
- afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo repository.ChartRepository) {
- t.Expect(chartRepo.Checksum).ToNot(BeEmpty())
- t.Expect(chartRepo.CachePath).ToNot(BeEmpty())
- t.Expect(artifact.Checksum).To(BeEmpty())
- t.Expect(artifact.Revision).ToNot(BeEmpty())
- },
- },
- {
- name: "HTTP with Basic Auth secret makes ArtifactOutdated=True",
- protocol: "http",
- server: options{
- username: "git",
- password: "1234",
- },
- secret: &corev1.Secret{
- ObjectMeta: metav1.ObjectMeta{
- Name: "basic-auth",
- },
- Data: map[string][]byte{
- "username": []byte("git"),
- "password": []byte("1234"),
- },
- },
- beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, checksum string) {
- obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "basic-auth"}
- },
- want: sreconcile.ResultSuccess,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"),
- *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new index revision"),
- },
- afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo repository.ChartRepository) {
- t.Expect(chartRepo.Checksum).ToNot(BeEmpty())
- t.Expect(chartRepo.CachePath).ToNot(BeEmpty())
- t.Expect(artifact.Checksum).To(BeEmpty())
- t.Expect(artifact.Revision).ToNot(BeEmpty())
- },
- },
- {
- name: "HTTPS with CAFile secret makes ArtifactOutdated=True",
- protocol: "https",
- server: options{
- publicKey: tlsPublicKey,
- privateKey: tlsPrivateKey,
- ca: tlsCA,
- },
- secret: &corev1.Secret{
- ObjectMeta: metav1.ObjectMeta{
- Name: "ca-file",
- },
- Data: map[string][]byte{
- "caFile": tlsCA,
- },
- },
- beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, checksum string) {
- obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "ca-file"}
- },
- want: sreconcile.ResultSuccess,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"),
- *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new index revision"),
- },
- afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo repository.ChartRepository) {
- t.Expect(chartRepo.Checksum).ToNot(BeEmpty())
- t.Expect(chartRepo.CachePath).ToNot(BeEmpty())
- t.Expect(artifact.Checksum).To(BeEmpty())
- t.Expect(artifact.Revision).ToNot(BeEmpty())
- },
- },
- {
- name: "HTTPS with invalid CAFile secret makes FetchFailed=True and returns error",
- protocol: "https",
- server: options{
- publicKey: tlsPublicKey,
- privateKey: tlsPrivateKey,
- ca: tlsCA,
- },
- secret: &corev1.Secret{
- ObjectMeta: metav1.ObjectMeta{
- Name: "invalid-ca",
- },
- Data: map[string][]byte{
- "caFile": []byte("invalid"),
- },
- },
- beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, checksum string) {
- obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "invalid-ca"}
- },
- wantErr: true,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to create TLS client config with secret data: cannot append certificate into certificate pool: invalid caFile"),
- },
- afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo repository.ChartRepository) {
- // No repo index due to fetch fail.
- t.Expect(chartRepo.Checksum).To(BeEmpty())
- t.Expect(chartRepo.CachePath).To(BeEmpty())
- t.Expect(artifact.Checksum).To(BeEmpty())
- t.Expect(artifact.Revision).To(BeEmpty())
- },
- },
- {
- name: "Invalid URL makes FetchFailed=True and returns stalling error",
- protocol: "http",
- beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, checksum string) {
- obj.Spec.URL = strings.ReplaceAll(obj.Spec.URL, "http://", "")
- },
- want: sreconcile.ResultEmpty,
- wantErr: true,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.URLInvalidReason, "first path segment in URL cannot contain colon"),
- },
- afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo repository.ChartRepository) {
- // No repo index due to fetch fail.
- t.Expect(chartRepo.Checksum).To(BeEmpty())
- t.Expect(chartRepo.CachePath).To(BeEmpty())
- t.Expect(artifact.Checksum).To(BeEmpty())
- t.Expect(artifact.Revision).To(BeEmpty())
- },
- },
- {
- name: "Unsupported scheme makes FetchFailed=True and returns stalling error",
- protocol: "http",
- beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, checksum string) {
- obj.Spec.URL = strings.ReplaceAll(obj.Spec.URL, "http://", "ftp://")
- },
- want: sreconcile.ResultEmpty,
- wantErr: true,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, meta.FailedReason, "scheme \"ftp\" not supported"),
- },
- afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo repository.ChartRepository) {
- // No repo index due to fetch fail.
- t.Expect(chartRepo.Checksum).To(BeEmpty())
- t.Expect(chartRepo.CachePath).To(BeEmpty())
- t.Expect(artifact.Checksum).To(BeEmpty())
- t.Expect(artifact.Revision).To(BeEmpty())
- },
- },
- {
- name: "Missing secret returns FetchFailed=True and returns error",
- protocol: "http",
- beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, checksum string) {
- obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "non-existing"}
- },
- wantErr: true,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "secrets \"non-existing\" not found"),
- },
- afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo repository.ChartRepository) {
- // No repo index due to fetch fail.
- t.Expect(chartRepo.Checksum).To(BeEmpty())
- t.Expect(chartRepo.CachePath).To(BeEmpty())
- t.Expect(artifact.Checksum).To(BeEmpty())
- t.Expect(artifact.Revision).To(BeEmpty())
- },
- },
- {
- name: "Malformed secret returns FetchFailed=True and returns error",
- protocol: "http",
- secret: &corev1.Secret{
- ObjectMeta: metav1.ObjectMeta{
- Name: "malformed-basic-auth",
- },
- Data: map[string][]byte{
- "username": []byte("git"),
- },
- },
- beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, checksum string) {
- obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "malformed-basic-auth"}
- },
- wantErr: true,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "required fields 'username' and 'password"),
- },
- afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo repository.ChartRepository) {
- // No repo index due to fetch fail.
- t.Expect(chartRepo.Checksum).To(BeEmpty())
- t.Expect(chartRepo.CachePath).To(BeEmpty())
- t.Expect(artifact.Checksum).To(BeEmpty())
- t.Expect(artifact.Revision).To(BeEmpty())
- },
- },
- {
- name: "cached index with same checksum",
- protocol: "http",
- beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, checksum string) {
- obj.Status.Artifact = &sourcev1.Artifact{
- Revision: checksum,
- Checksum: checksum,
- }
- },
- afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo repository.ChartRepository) {
- // chartRepo.Checksum isn't populated, artifact.Checksum is
- // populated from the cached repo index data.
- t.Expect(chartRepo.Checksum).To(BeEmpty())
- t.Expect(chartRepo.CachePath).ToNot(BeEmpty())
- t.Expect(artifact.Checksum).To(Equal(obj.Status.Artifact.Checksum))
- t.Expect(artifact.Revision).To(Equal(obj.Status.Artifact.Revision))
- },
- want: sreconcile.ResultSuccess,
- },
- {
- name: "cached index with different checksum",
- protocol: "http",
- beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, checksum string) {
- obj.Status.Artifact = &sourcev1.Artifact{
- Revision: checksum,
- Checksum: "foo",
- }
- },
- afterFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, chartRepo repository.ChartRepository) {
- t.Expect(chartRepo.Checksum).ToNot(BeEmpty())
- t.Expect(chartRepo.CachePath).ToNot(BeEmpty())
- t.Expect(artifact.Checksum).To(BeEmpty())
- t.Expect(artifact.Revision).To(Equal(obj.Status.Artifact.Revision))
- },
- want: sreconcile.ResultSuccess,
- },
- }
-
- for _, tt := range tests {
- obj := &sourcev1.HelmRepository{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "auth-strategy-",
- },
- Spec: sourcev1.HelmRepositorySpec{
- Interval: metav1.Duration{Duration: interval},
- Timeout: &metav1.Duration{Duration: timeout},
- },
- }
-
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
-
- server, err := helmtestserver.NewTempHelmServer()
- g.Expect(err).NotTo(HaveOccurred())
- defer os.RemoveAll(server.Root())
-
- g.Expect(server.PackageChart("testdata/charts/helmchart")).To(Succeed())
- g.Expect(server.GenerateIndex()).To(Succeed())
-
- if len(tt.server.username+tt.server.password) > 0 {
- server.WithMiddleware(func(handler http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- u, p, ok := r.BasicAuth()
- if !ok || u != tt.server.username || p != tt.server.password {
- w.WriteHeader(401)
- return
- }
- handler.ServeHTTP(w, r)
- })
- })
- }
-
- secret := tt.secret.DeepCopy()
- switch tt.protocol {
- case "http":
- server.Start()
- defer server.Stop()
- obj.Spec.URL = server.URL()
- case "https":
- g.Expect(server.StartTLS(tt.server.publicKey, tt.server.privateKey, tt.server.ca, "example.com")).To(Succeed())
- defer server.Stop()
- obj.Spec.URL = server.URL()
- default:
- t.Fatalf("unsupported protocol %q", tt.protocol)
- }
-
- builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme())
- if secret != nil {
- builder.WithObjects(secret.DeepCopy())
- }
-
- // Calculate the artifact checksum for valid repos configurations.
- clientOpts := []helmgetter.Option{
- helmgetter.WithURL(server.URL()),
- }
- var newChartRepo *repository.ChartRepository
- var tOpts *tls.Config
- validSecret := true
- if secret != nil {
- // Extract the client options from secret, ignoring any invalid
- // value. validSecret is used to determine if the indexChecksum
- // should be calculated below.
- var cOpts []helmgetter.Option
- var serr error
- cOpts, serr = getter.ClientOptionsFromSecret(*secret)
- if serr != nil {
- validSecret = false
- }
- clientOpts = append(clientOpts, cOpts...)
- tOpts, serr = getter.TLSClientConfigFromSecret(*secret, server.URL())
- if serr != nil {
- validSecret = false
- }
- newChartRepo, err = repository.NewChartRepository(obj.Spec.URL, "", testGetters, tOpts, clientOpts)
- } else {
- newChartRepo, err = repository.NewChartRepository(obj.Spec.URL, "", testGetters, nil, nil)
- }
- g.Expect(err).ToNot(HaveOccurred())
-
- // NOTE: checksum will be empty in beforeFunc for invalid repo
- // configurations as the client can't get the repo.
- var indexChecksum string
- if validSecret {
- indexChecksum, err = newChartRepo.CacheIndex()
- g.Expect(err).ToNot(HaveOccurred())
- }
-
- if tt.beforeFunc != nil {
- tt.beforeFunc(g, obj, indexChecksum)
- }
-
- r := &HelmRepositoryReconciler{
- EventRecorder: record.NewFakeRecorder(32),
- Client: builder.Build(),
- Storage: testStorage,
- Getters: testGetters,
- }
-
- var chartRepo repository.ChartRepository
- var artifact sourcev1.Artifact
- got, err := r.reconcileSource(context.TODO(), obj, &artifact, &chartRepo)
- defer os.Remove(chartRepo.CachePath)
-
- g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions))
- g.Expect(err != nil).To(Equal(tt.wantErr))
- g.Expect(got).To(Equal(tt.want))
-
- if tt.afterFunc != nil {
- tt.afterFunc(g, obj, artifact, chartRepo)
- }
- })
- }
-}
-
-func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) {
- tests := []struct {
- name string
- beforeFunc func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository)
- afterFunc func(t *WithT, obj *sourcev1.HelmRepository)
- want sreconcile.Result
- wantErr bool
- assertConditions []metav1.Condition
- }{
- {
- name: "Archiving artifact to storage makes ArtifactInStorage=True",
- beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) {
- obj.Spec.Interval = metav1.Duration{Duration: interval}
- },
- want: sreconcile.ResultSuccess,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'existing'"),
- },
- },
- {
- name: "Up-to-date artifact should not update status",
- beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) {
- obj.Spec.Interval = metav1.Duration{Duration: interval}
- obj.Status.Artifact = artifact.DeepCopy()
- },
- afterFunc: func(t *WithT, obj *sourcev1.HelmRepository) {
- t.Expect(obj.Status.URL).To(BeEmpty())
- },
- want: sreconcile.ResultSuccess,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'existing'"),
- },
- },
- {
- name: "Removes ArtifactOutdatedCondition after creating a new artifact",
- beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) {
- obj.Spec.Interval = metav1.Duration{Duration: interval}
- conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "")
- },
- want: sreconcile.ResultSuccess,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'existing'"),
- },
- },
- {
- name: "Creates latest symlink to the created artifact",
- beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) {
- obj.Spec.Interval = metav1.Duration{Duration: interval}
- },
- afterFunc: func(t *WithT, obj *sourcev1.HelmRepository) {
- localPath := testStorage.LocalPath(*obj.GetArtifact())
- symlinkPath := filepath.Join(filepath.Dir(localPath), "index.yaml")
- targetFile, err := os.Readlink(symlinkPath)
- t.Expect(err).NotTo(HaveOccurred())
- t.Expect(localPath).To(Equal(targetFile))
- },
- want: sreconcile.ResultSuccess,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'existing'"),
- },
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
-
- r := &HelmRepositoryReconciler{
- EventRecorder: record.NewFakeRecorder(32),
- Storage: testStorage,
- }
-
- obj := &sourcev1.HelmRepository{
- TypeMeta: metav1.TypeMeta{
- Kind: sourcev1.HelmRepositoryKind,
- },
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "test-bucket-",
- Generation: 1,
- Namespace: "default",
- },
- Spec: sourcev1.HelmRepositorySpec{
- Timeout: &metav1.Duration{Duration: timeout},
- URL: "https://example.com/index.yaml",
- },
- }
-
- tmpDir := t.TempDir()
-
- // Create an empty cache file.
- cachePath := filepath.Join(tmpDir, "index.yaml")
- cacheFile, err := os.Create(cachePath)
- g.Expect(err).ToNot(HaveOccurred())
- g.Expect(cacheFile.Close()).ToNot(HaveOccurred())
-
- chartRepo, err := repository.NewChartRepository(obj.Spec.URL, "", testGetters, nil, nil)
- g.Expect(err).ToNot(HaveOccurred())
- chartRepo.CachePath = cachePath
-
- artifact := testStorage.NewArtifactFor(obj.Kind, obj, "existing", "foo.tar.gz")
- // Checksum of the index file calculated by the ChartRepository.
- artifact.Checksum = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
-
- if tt.beforeFunc != nil {
- tt.beforeFunc(g, obj, artifact, chartRepo)
- }
-
- got, err := r.reconcileArtifact(context.TODO(), obj, &artifact, chartRepo)
- g.Expect(err != nil).To(Equal(tt.wantErr))
- g.Expect(got).To(Equal(tt.want))
-
- // On error, artifact is empty. Check artifacts only on successful
- // reconcile.
- if !tt.wantErr {
- g.Expect(obj.Status.Artifact).To(MatchArtifact(artifact.DeepCopy()))
- }
- g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions))
-
- if tt.afterFunc != nil {
- tt.afterFunc(g, obj)
- }
- })
- }
-}
-
-func TestHelmRepositoryReconciler_reconcileSubRecs(t *testing.T) {
- // Helper to build simple helmRepositoryReconcileFunc with result and error.
- buildReconcileFuncs := func(r sreconcile.Result, e error) helmRepositoryReconcileFunc {
- return func(ctx context.Context, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) {
- return r, e
- }
- }
-
- tests := []struct {
- name string
- generation int64
- observedGeneration int64
- reconcileFuncs []helmRepositoryReconcileFunc
- wantResult sreconcile.Result
- wantErr bool
- assertConditions []metav1.Condition
- }{
- {
- name: "successful reconciliations",
- reconcileFuncs: []helmRepositoryReconcileFunc{
- buildReconcileFuncs(sreconcile.ResultSuccess, nil),
- },
- wantResult: sreconcile.ResultSuccess,
- wantErr: false,
- },
- {
- name: "successful reconciliation with generation difference",
- generation: 3,
- observedGeneration: 2,
- reconcileFuncs: []helmRepositoryReconcileFunc{
- buildReconcileFuncs(sreconcile.ResultSuccess, nil),
- },
- wantResult: sreconcile.ResultSuccess,
- wantErr: false,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(meta.ReconcilingCondition, "NewGeneration", "reconciling new object generation (3)"),
- },
- },
- {
- name: "failed reconciliation",
- reconcileFuncs: []helmRepositoryReconcileFunc{
- buildReconcileFuncs(sreconcile.ResultEmpty, fmt.Errorf("some error")),
- },
- wantResult: sreconcile.ResultEmpty,
- wantErr: true,
- },
- {
- name: "multiple object status conditions mutations",
- reconcileFuncs: []helmRepositoryReconcileFunc{
- func(ctx context.Context, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) {
- conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision")
- return sreconcile.ResultSuccess, nil
- },
- func(ctx context.Context, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) {
- conditions.MarkTrue(obj, meta.ReconcilingCondition, "Progressing", "creating artifact")
- return sreconcile.ResultSuccess, nil
- },
- },
- wantResult: sreconcile.ResultSuccess,
- wantErr: false,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"),
- *conditions.TrueCondition(meta.ReconcilingCondition, "Progressing", "creating artifact"),
- },
- },
- {
- name: "subrecs with one result=Requeue, no error",
- reconcileFuncs: []helmRepositoryReconcileFunc{
- buildReconcileFuncs(sreconcile.ResultSuccess, nil),
- buildReconcileFuncs(sreconcile.ResultRequeue, nil),
- buildReconcileFuncs(sreconcile.ResultSuccess, nil),
- },
- wantResult: sreconcile.ResultRequeue,
- wantErr: false,
- },
- {
- name: "subrecs with error before result=Requeue",
- reconcileFuncs: []helmRepositoryReconcileFunc{
- buildReconcileFuncs(sreconcile.ResultSuccess, nil),
- buildReconcileFuncs(sreconcile.ResultEmpty, fmt.Errorf("some error")),
- buildReconcileFuncs(sreconcile.ResultRequeue, nil),
- },
- wantResult: sreconcile.ResultEmpty,
- wantErr: true,
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
-
- r := &HelmRepositoryReconciler{}
- obj := &sourcev1.HelmRepository{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "test-",
- Generation: tt.generation,
- },
- Status: sourcev1.HelmRepositoryStatus{
- ObservedGeneration: tt.observedGeneration,
- },
- }
-
- ctx := context.TODO()
-
- gotRes, gotErr := r.reconcile(ctx, obj, tt.reconcileFuncs)
- g.Expect(gotErr != nil).To(Equal(tt.wantErr))
- g.Expect(gotRes).To(Equal(tt.wantResult))
-
- g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions))
- })
- }
-}
-
-func TestHelmRepositoryReconciler_statusConditions(t *testing.T) {
- tests := []struct {
- name string
- beforeFunc func(obj *sourcev1.HelmRepository)
- assertConditions []metav1.Condition
- }{
- {
- name: "positive conditions only",
- beforeFunc: func(obj *sourcev1.HelmRepository) {
- conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision")
- },
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision"),
- *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision"),
- },
- },
- {
- name: "multiple failures",
- beforeFunc: func(obj *sourcev1.HelmRepository) {
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret")
- conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory")
- conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "some error")
- },
- assertConditions: []metav1.Condition{
- *conditions.FalseCondition(meta.ReadyCondition, sourcev1.DirCreationFailedReason, "failed to create directory"),
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"),
- *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory"),
- *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "some error"),
- },
- },
- {
- name: "mixed positive and negative conditions",
- beforeFunc: func(obj *sourcev1.HelmRepository) {
- conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision")
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret")
- },
- assertConditions: []metav1.Condition{
- *conditions.FalseCondition(meta.ReadyCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"),
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"),
- *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision"),
- },
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
-
- obj := &sourcev1.HelmRepository{
- TypeMeta: metav1.TypeMeta{
- Kind: sourcev1.HelmRepositoryKind,
- APIVersion: "source.toolkit.fluxcd.io/v1beta2",
- },
- ObjectMeta: metav1.ObjectMeta{
- Name: "helmrepo",
- Namespace: "foo",
- },
- }
- clientBuilder := fake.NewClientBuilder()
- clientBuilder.WithObjects(obj)
- c := clientBuilder.Build()
-
- patchHelper, err := patch.NewHelper(obj, c)
- g.Expect(err).ToNot(HaveOccurred())
-
- if tt.beforeFunc != nil {
- tt.beforeFunc(obj)
- }
-
- ctx := context.TODO()
- recResult := sreconcile.ResultSuccess
- var retErr error
-
- summarizeHelper := summarize.NewHelper(record.NewFakeRecorder(32), patchHelper)
- summarizeOpts := []summarize.Option{
- summarize.WithConditions(helmRepositoryReadyCondition),
- summarize.WithReconcileResult(recResult),
- summarize.WithReconcileError(retErr),
- summarize.WithIgnoreNotFound(),
- summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetRequeueAfter()}),
- summarize.WithPatchFieldOwner("source-controller"),
- }
- _, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...)
-
- key := client.ObjectKeyFromObject(obj)
- g.Expect(c.Get(ctx, key, obj)).ToNot(HaveOccurred())
- g.Expect(obj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions))
- })
- }
-}
-
-func TestHelmRepositoryReconciler_notify(t *testing.T) {
- var aSize int64 = 30000
- tests := []struct {
- name string
- res sreconcile.Result
- resErr error
- oldObjBeforeFunc func(obj *sourcev1.HelmRepository)
- newObjBeforeFunc func(obj *sourcev1.HelmRepository)
- wantEvent string
- }{
- {
- name: "error - no event",
- res: sreconcile.ResultEmpty,
- resErr: errors.New("some error"),
- },
- {
- name: "new artifact with nil size",
- res: sreconcile.ResultSuccess,
- resErr: nil,
- newObjBeforeFunc: func(obj *sourcev1.HelmRepository) {
- obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy", Size: nil}
- },
- wantEvent: "Normal NewArtifact stored fetched index of unknown size",
- },
- {
- name: "new artifact",
- res: sreconcile.ResultSuccess,
- resErr: nil,
- newObjBeforeFunc: func(obj *sourcev1.HelmRepository) {
- obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy", Size: &aSize}
- },
- wantEvent: "Normal NewArtifact stored fetched index of size",
- },
- {
- name: "recovery from failure",
- res: sreconcile.ResultSuccess,
- resErr: nil,
- oldObjBeforeFunc: func(obj *sourcev1.HelmRepository) {
- obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy", Size: &aSize}
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail")
- conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo")
- },
- newObjBeforeFunc: func(obj *sourcev1.HelmRepository) {
- obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy", Size: &aSize}
- conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
- },
- wantEvent: "Normal Succeeded stored fetched index of size",
- },
- {
- name: "recovery and new artifact",
- res: sreconcile.ResultSuccess,
- resErr: nil,
- oldObjBeforeFunc: func(obj *sourcev1.HelmRepository) {
- obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy", Size: &aSize}
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail")
- conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo")
- },
- newObjBeforeFunc: func(obj *sourcev1.HelmRepository) {
- obj.Status.Artifact = &sourcev1.Artifact{Revision: "aaa", Checksum: "bbb", Size: &aSize}
- conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
- },
- wantEvent: "Normal NewArtifact stored fetched index of size",
- },
- {
- name: "no updates",
- res: sreconcile.ResultSuccess,
- resErr: nil,
- oldObjBeforeFunc: func(obj *sourcev1.HelmRepository) {
- obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy", Size: &aSize}
- conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
- },
- newObjBeforeFunc: func(obj *sourcev1.HelmRepository) {
- obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy", Size: &aSize}
- conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
- },
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
- recorder := record.NewFakeRecorder(32)
-
- oldObj := &sourcev1.HelmRepository{}
- newObj := oldObj.DeepCopy()
-
- if tt.oldObjBeforeFunc != nil {
- tt.oldObjBeforeFunc(oldObj)
- }
- if tt.newObjBeforeFunc != nil {
- tt.newObjBeforeFunc(newObj)
- }
-
- reconciler := &HelmRepositoryReconciler{
- EventRecorder: recorder,
- }
- chartRepo := repository.ChartRepository{
- URL: "some-address",
- }
- reconciler.notify(ctx, oldObj, newObj, chartRepo, tt.res, tt.resErr)
-
- select {
- case x, ok := <-recorder.Events:
- g.Expect(ok).To(Equal(tt.wantEvent != ""), "unexpected event received")
- if tt.wantEvent != "" {
- g.Expect(x).To(ContainSubstring(tt.wantEvent))
- }
- default:
- if tt.wantEvent != "" {
- t.Errorf("expected some event to be emitted")
- }
- }
- })
- }
-}
-
-func TestHelmRepositoryReconciler_ReconcileTypeUpdatePredicateFilter(t *testing.T) {
- g := NewWithT(t)
-
- testServer, err := helmtestserver.NewTempHelmServer()
- g.Expect(err).NotTo(HaveOccurred())
- defer os.RemoveAll(testServer.Root())
-
- g.Expect(testServer.PackageChart("testdata/charts/helmchart")).To(Succeed())
- g.Expect(testServer.GenerateIndex()).To(Succeed())
-
- testServer.Start()
- defer testServer.Stop()
-
- obj := &sourcev1.HelmRepository{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "helmrepository-reconcile-",
- Namespace: "default",
- },
- Spec: sourcev1.HelmRepositorySpec{
- Interval: metav1.Duration{Duration: interval},
- URL: testServer.URL(),
- },
- }
- g.Expect(testEnv.CreateAndWait(ctx, obj)).To(Succeed())
-
- key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace}
-
- // Wait for finalizer to be set
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, obj); err != nil {
- return false
- }
- return len(obj.Finalizers) > 0
- }, timeout).Should(BeTrue())
-
- // Wait for HelmRepository to be Ready
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, obj); err != nil {
- return false
- }
- if !conditions.IsReady(obj) && obj.Status.Artifact == nil {
- return false
- }
- readyCondition := conditions.Get(obj, meta.ReadyCondition)
- return readyCondition.Status == metav1.ConditionTrue &&
- obj.Generation == readyCondition.ObservedGeneration &&
- obj.Generation == obj.Status.ObservedGeneration
- }, timeout).Should(BeTrue())
-
- // Check if the object status is valid.
- condns := &status.Conditions{NegativePolarity: helmRepositoryReadyCondition.NegativePolarity}
- checker := status.NewChecker(testEnv.Client, condns)
- checker.CheckErr(ctx, obj)
-
- // kstatus client conformance check.
- u, err := patch.ToUnstructured(obj)
- g.Expect(err).ToNot(HaveOccurred())
- res, err := kstatus.Compute(u)
- g.Expect(err).ToNot(HaveOccurred())
- g.Expect(res.Status).To(Equal(kstatus.CurrentStatus))
-
- // Switch to a OCI helm repository type
- secret := &corev1.Secret{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "helmrepository-reconcile-",
- Namespace: "default",
- },
- Data: map[string][]byte{
- "username": []byte(testRegistryUsername),
- "password": []byte(testRegistryPassword),
- },
- }
- g.Expect(testEnv.CreateAndWait(ctx, secret)).To(Succeed())
-
- obj.Spec.Type = sourcev1.HelmRepositoryTypeOCI
- obj.Spec.URL = fmt.Sprintf("oci://%s", testRegistryServer.registryHost)
- obj.Spec.SecretRef = &meta.LocalObjectReference{
- Name: secret.Name,
- }
-
- oldGen := obj.GetGeneration()
- g.Expect(testEnv.Update(ctx, obj)).To(Succeed())
- newGen := oldGen + 1
-
- // Wait for HelmRepository to be Ready with new generation.
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, obj); err != nil {
- return false
- }
- if !conditions.IsReady(obj) && obj.Status.Artifact != nil {
- return false
- }
- readyCondition := conditions.Get(obj, meta.ReadyCondition)
- if readyCondition == nil {
- return false
- }
- return readyCondition.Status == metav1.ConditionTrue &&
- newGen == readyCondition.ObservedGeneration &&
- newGen == obj.Status.ObservedGeneration
- }, timeout).Should(BeTrue())
-
- // Check if the object status is valid.
- condns = &status.Conditions{NegativePolarity: helmRepositoryOCINegativeConditions}
- checker = status.NewChecker(testEnv.Client, condns)
- checker.CheckErr(ctx, obj)
-
- g.Expect(testEnv.Delete(ctx, obj)).To(Succeed())
-
- // Wait for HelmRepository to be deleted
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, obj); err != nil {
- return apierrors.IsNotFound(err)
- }
- return false
- }, timeout).Should(BeTrue())
-}
-
-func TestHelmRepositoryReconciler_ReconcileSpecUpdatePredicateFilter(t *testing.T) {
- g := NewWithT(t)
-
- testServer, err := helmtestserver.NewTempHelmServer()
- g.Expect(err).NotTo(HaveOccurred())
- defer os.RemoveAll(testServer.Root())
-
- g.Expect(testServer.PackageChart("testdata/charts/helmchart")).To(Succeed())
- g.Expect(testServer.GenerateIndex()).To(Succeed())
-
- testServer.Start()
- defer testServer.Stop()
-
- obj := &sourcev1.HelmRepository{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "helmrepository-reconcile-",
- Namespace: "default",
- },
- Spec: sourcev1.HelmRepositorySpec{
- Interval: metav1.Duration{Duration: interval},
- URL: testServer.URL(),
- },
- }
- g.Expect(testEnv.CreateAndWait(ctx, obj)).To(Succeed())
-
- key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace}
-
- // Wait for finalizer to be set
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, obj); err != nil {
- return false
- }
- return len(obj.Finalizers) > 0
- }, timeout).Should(BeTrue())
-
- // Wait for HelmRepository to be Ready
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, obj); err != nil {
- return false
- }
- if !conditions.IsReady(obj) && obj.Status.Artifact == nil {
- return false
- }
- readyCondition := conditions.Get(obj, meta.ReadyCondition)
- return readyCondition.Status == metav1.ConditionTrue &&
- obj.Generation == readyCondition.ObservedGeneration &&
- obj.Generation == obj.Status.ObservedGeneration
- }, timeout).Should(BeTrue())
-
- // Check if the object status is valid.
- condns := &status.Conditions{NegativePolarity: helmRepositoryReadyCondition.NegativePolarity}
- checker := status.NewChecker(testEnv.Client, condns)
- checker.CheckErr(ctx, obj)
-
- // kstatus client conformance check.
- u, err := patch.ToUnstructured(obj)
- g.Expect(err).ToNot(HaveOccurred())
- res, err := kstatus.Compute(u)
- g.Expect(err).ToNot(HaveOccurred())
- g.Expect(res.Status).To(Equal(kstatus.CurrentStatus))
-
- // Change spec Interval to validate spec update
- obj.Spec.Interval = metav1.Duration{Duration: interval + time.Second}
- oldGen := obj.GetGeneration()
- g.Expect(testEnv.Update(ctx, obj)).To(Succeed())
- newGen := oldGen + 1
-
- // Wait for HelmRepository to be Ready
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, obj); err != nil {
- return false
- }
- if !conditions.IsReady(obj) && obj.Status.Artifact == nil {
- return false
- }
- readyCondition := conditions.Get(obj, meta.ReadyCondition)
- return readyCondition.Status == metav1.ConditionTrue &&
- newGen == readyCondition.ObservedGeneration &&
- newGen == obj.Status.ObservedGeneration
- }, timeout).Should(BeTrue())
-
- // Check if the object status is valid.
- condns = &status.Conditions{NegativePolarity: helmRepositoryReadyCondition.NegativePolarity}
- checker = status.NewChecker(testEnv.Client, condns)
- checker.CheckErr(ctx, obj)
-
- g.Expect(testEnv.Delete(ctx, obj)).To(Succeed())
-
- // Wait for HelmRepository to be deleted
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, obj); err != nil {
- return apierrors.IsNotFound(err)
- }
- return false
- }, timeout).Should(BeTrue())
-}
-
-func TestHelmRepositoryReconciler_InMemoryCaching(t *testing.T) {
- g := NewWithT(t)
- testCache.Clear()
-
- testServer, err := helmtestserver.NewTempHelmServer()
- g.Expect(err).NotTo(HaveOccurred())
- defer os.RemoveAll(testServer.Root())
-
- g.Expect(testServer.PackageChartWithVersion("testdata/charts/helmchart", "0.1.0")).To(Succeed())
- g.Expect(testServer.GenerateIndex()).To(Succeed())
-
- testServer.Start()
- defer testServer.Stop()
-
- ns, err := testEnv.CreateNamespace(ctx, "helmrepository")
- g.Expect(err).ToNot(HaveOccurred())
- defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }()
-
- helmRepo := &sourcev1.HelmRepository{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "helmrepository-",
- Namespace: ns.Name,
- },
- Spec: sourcev1.HelmRepositorySpec{
- URL: testServer.URL(),
- },
- }
- g.Expect(testEnv.CreateAndWait(ctx, helmRepo)).To(Succeed())
-
- key := client.ObjectKey{Name: helmRepo.Name, Namespace: helmRepo.Namespace}
- // Wait for finalizer to be set
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, helmRepo); err != nil {
- return false
- }
- return len(helmRepo.Finalizers) > 0
- }, timeout).Should(BeTrue())
-
- // Wait for HelmRepository to be Ready
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, helmRepo); err != nil {
- return false
- }
- if !conditions.IsReady(helmRepo) || helmRepo.Status.Artifact == nil {
- return false
- }
- readyCondition := conditions.Get(helmRepo, meta.ReadyCondition)
- return helmRepo.Generation == readyCondition.ObservedGeneration &&
- helmRepo.Generation == helmRepo.Status.ObservedGeneration
- }, timeout).Should(BeTrue())
-
- err = testEnv.Get(ctx, key, helmRepo)
- g.Expect(err).ToNot(HaveOccurred())
- localPath := testStorage.LocalPath(*helmRepo.GetArtifact())
- _, cacheHit := testCache.Get(localPath)
- g.Expect(cacheHit).To(BeTrue())
-}
diff --git a/controllers/ocirepository_controller.go b/controllers/ocirepository_controller.go
deleted file mode 100644
index 2a4993bbb..000000000
--- a/controllers/ocirepository_controller.go
+++ /dev/null
@@ -1,910 +0,0 @@
-/*
-Copyright 2022 The Flux authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package controllers
-
-import (
- "context"
- "crypto/tls"
- "crypto/x509"
- "errors"
- "fmt"
- "net/http"
- "os"
- "sort"
- "strings"
- "time"
-
- "github.com/Masterminds/semver/v3"
- "github.com/google/go-containerregistry/pkg/authn"
- "github.com/google/go-containerregistry/pkg/authn/k8schain"
- "github.com/google/go-containerregistry/pkg/crane"
- "github.com/google/go-containerregistry/pkg/name"
- "github.com/google/go-containerregistry/pkg/v1/remote"
- corev1 "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/types"
- "k8s.io/apimachinery/pkg/util/sets"
- "k8s.io/apimachinery/pkg/util/uuid"
- kuberecorder "k8s.io/client-go/tools/record"
-
- ctrl "sigs.k8s.io/controller-runtime"
- "sigs.k8s.io/controller-runtime/pkg/builder"
- "sigs.k8s.io/controller-runtime/pkg/client"
- "sigs.k8s.io/controller-runtime/pkg/controller"
- "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
- "sigs.k8s.io/controller-runtime/pkg/predicate"
- "sigs.k8s.io/controller-runtime/pkg/ratelimiter"
-
- "github.com/fluxcd/pkg/apis/meta"
- "github.com/fluxcd/pkg/oci"
- "github.com/fluxcd/pkg/oci/auth/login"
- "github.com/fluxcd/pkg/runtime/conditions"
- helper "github.com/fluxcd/pkg/runtime/controller"
- "github.com/fluxcd/pkg/runtime/events"
- "github.com/fluxcd/pkg/runtime/patch"
- "github.com/fluxcd/pkg/runtime/predicates"
- "github.com/fluxcd/pkg/untar"
- "github.com/fluxcd/pkg/version"
- sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
- serror "github.com/fluxcd/source-controller/internal/error"
- sreconcile "github.com/fluxcd/source-controller/internal/reconcile"
- "github.com/fluxcd/source-controller/internal/reconcile/summarize"
- "github.com/fluxcd/source-controller/internal/util"
-)
-
-// ociRepositoryReadyCondition contains the information required to summarize a
-// v1beta2.OCIRepository Ready Condition.
-var ociRepositoryReadyCondition = summarize.Conditions{
- Target: meta.ReadyCondition,
- Owned: []string{
- sourcev1.StorageOperationFailedCondition,
- sourcev1.FetchFailedCondition,
- sourcev1.ArtifactOutdatedCondition,
- sourcev1.ArtifactInStorageCondition,
- meta.ReadyCondition,
- meta.ReconcilingCondition,
- meta.StalledCondition,
- },
- Summarize: []string{
- sourcev1.StorageOperationFailedCondition,
- sourcev1.FetchFailedCondition,
- sourcev1.ArtifactOutdatedCondition,
- sourcev1.ArtifactInStorageCondition,
- meta.StalledCondition,
- meta.ReconcilingCondition,
- },
- NegativePolarity: []string{
- sourcev1.StorageOperationFailedCondition,
- sourcev1.FetchFailedCondition,
- sourcev1.ArtifactOutdatedCondition,
- meta.StalledCondition,
- meta.ReconcilingCondition,
- },
-}
-
-// ociRepositoryFailConditions contains the conditions that represent a failure.
-var ociRepositoryFailConditions = []string{
- sourcev1.FetchFailedCondition,
- sourcev1.StorageOperationFailedCondition,
-}
-
-type invalidOCIURLError struct {
- err error
-}
-
-func (e invalidOCIURLError) Error() string {
- return e.err.Error()
-}
-
-// ociRepositoryReconcileFunc is the function type for all the v1beta2.OCIRepository
-// (sub)reconcile functions. The type implementations are grouped and
-// executed serially to perform the complete reconcile of the object.
-type ociRepositoryReconcileFunc func(ctx context.Context, obj *sourcev1.OCIRepository, metadata *sourcev1.Artifact, dir string) (sreconcile.Result, error)
-
-// OCIRepositoryReconciler reconciles a v1beta2.OCIRepository object
-type OCIRepositoryReconciler struct {
- client.Client
- helper.Metrics
- kuberecorder.EventRecorder
-
- Storage *Storage
- ControllerName string
- requeueDependency time.Duration
-}
-
-type OCIRepositoryReconcilerOptions struct {
- MaxConcurrentReconciles int
- DependencyRequeueInterval time.Duration
- RateLimiter ratelimiter.RateLimiter
-}
-
-// SetupWithManager sets up the controller with the Manager.
-func (r *OCIRepositoryReconciler) SetupWithManager(mgr ctrl.Manager) error {
- return r.SetupWithManagerAndOptions(mgr, OCIRepositoryReconcilerOptions{})
-}
-
-func (r *OCIRepositoryReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts OCIRepositoryReconcilerOptions) error {
- r.requeueDependency = opts.DependencyRequeueInterval
-
- return ctrl.NewControllerManagedBy(mgr).
- For(&sourcev1.OCIRepository{}, builder.WithPredicates(
- predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{}),
- )).
- WithOptions(controller.Options{
- MaxConcurrentReconciles: opts.MaxConcurrentReconciles,
- RateLimiter: opts.RateLimiter,
- RecoverPanic: true,
- }).
- Complete(r)
-}
-
-// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=ocirepositories,verbs=get;list;watch;create;update;patch;delete
-// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=ocirepositories/status,verbs=get;update;patch
-// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=ocirepositories/finalizers,verbs=get;create;update;patch;delete
-// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch
-
-func (r *OCIRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, retErr error) {
- start := time.Now()
- log := ctrl.LoggerFrom(ctx).
- // Sets a reconcile ID to correlate logs from all suboperations.
- WithValues("reconcileID", uuid.NewUUID())
-
- // logger will be associated to the new context that is
- // returned from ctrl.LoggerInto.
- ctx = ctrl.LoggerInto(ctx, log)
-
- // Fetch the OCIRepository
- obj := &sourcev1.OCIRepository{}
- if err := r.Get(ctx, req.NamespacedName, obj); err != nil {
- return ctrl.Result{}, client.IgnoreNotFound(err)
- }
-
- // Record suspended status metric
- r.RecordSuspend(ctx, obj, obj.Spec.Suspend)
-
- // Return early if the object is suspended
- if obj.Spec.Suspend {
- log.Info("reconciliation is suspended for this object")
- return ctrl.Result{}, nil
- }
-
- // Initialize the patch helper with the current version of the object.
- patchHelper, err := patch.NewHelper(obj, r.Client)
- if err != nil {
- return ctrl.Result{}, err
- }
-
- // recResult stores the abstracted reconcile result.
- var recResult sreconcile.Result
-
- // Always attempt to patch the object and status after each reconciliation
- // NOTE: The final runtime result and error are set in this block.
- defer func() {
- summarizeHelper := summarize.NewHelper(r.EventRecorder, patchHelper)
- summarizeOpts := []summarize.Option{
- summarize.WithConditions(ociRepositoryReadyCondition),
- summarize.WithReconcileResult(recResult),
- summarize.WithReconcileError(retErr),
- summarize.WithIgnoreNotFound(),
- summarize.WithProcessors(
- summarize.ErrorActionHandler,
- summarize.RecordReconcileReq,
- ),
- summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetRequeueAfter()}),
- summarize.WithPatchFieldOwner(r.ControllerName),
- }
- result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...)
-
- // Always record readiness and duration metrics
- r.Metrics.RecordReadiness(ctx, obj)
- r.Metrics.RecordDuration(ctx, obj, start)
- }()
-
- // Add finalizer first if not exist to avoid the race condition between init and delete
- if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) {
- controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer)
- recResult = sreconcile.ResultRequeue
- return
- }
-
- // Examine if the object is under deletion
- if !obj.ObjectMeta.DeletionTimestamp.IsZero() {
- recResult, retErr = r.reconcileDelete(ctx, obj)
- return
- }
-
- // Reconcile actual object
- reconcilers := []ociRepositoryReconcileFunc{
- r.reconcileStorage,
- r.reconcileSource,
- r.reconcileArtifact,
- }
- recResult, retErr = r.reconcile(ctx, obj, reconcilers)
- return
-}
-
-// reconcile iterates through the ociRepositoryReconcileFunc tasks for the
-// object. It returns early on the first call that returns
-// reconcile.ResultRequeue, or produces an error.
-func (r *OCIRepositoryReconciler) reconcile(ctx context.Context, obj *sourcev1.OCIRepository, reconcilers []ociRepositoryReconcileFunc) (sreconcile.Result, error) {
- oldObj := obj.DeepCopy()
-
- // Mark as reconciling if generation differs.
- if obj.Generation != obj.Status.ObservedGeneration {
- conditions.MarkReconciling(obj, "NewGeneration", "reconciling new object generation (%d)", obj.Generation)
- }
-
- // Create temp working dir
- tmpDir, err := util.TempDirForObj("", obj)
- if err != nil {
- e := serror.NewGeneric(
- fmt.Errorf("failed to create temporary working directory: %w", err),
- sourcev1.DirCreationFailedReason,
- )
- conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
- return sreconcile.ResultEmpty, e
- }
- defer func() {
- if err = os.RemoveAll(tmpDir); err != nil {
- ctrl.LoggerFrom(ctx).Error(err, "failed to remove temporary working directory")
- }
- }()
- conditions.Delete(obj, sourcev1.StorageOperationFailedCondition)
-
- var (
- res sreconcile.Result
- resErr error
- metadata = sourcev1.Artifact{}
- )
-
- // Run the sub-reconcilers and build the result of reconciliation.
- for _, rec := range reconcilers {
- recResult, err := rec(ctx, obj, &metadata, tmpDir)
- // Exit immediately on ResultRequeue.
- if recResult == sreconcile.ResultRequeue {
- return sreconcile.ResultRequeue, nil
- }
- // If an error is received, prioritize the returned results because an
- // error also means immediate requeue.
- if err != nil {
- resErr = err
- res = recResult
- break
- }
- // Prioritize requeue request in the result.
- res = sreconcile.LowestRequeuingResult(res, recResult)
- }
-
- r.notify(ctx, oldObj, obj, res, resErr)
-
- return res, resErr
-}
-
-// reconcileSource fetches the upstream OCI artifact metadata and content.
-// If this fails, it records v1beta2.FetchFailedCondition=True on the object and returns early.
-func (r *OCIRepositoryReconciler) reconcileSource(ctx context.Context, obj *sourcev1.OCIRepository, metadata *sourcev1.Artifact, dir string) (sreconcile.Result, error) {
- ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration)
- defer cancel()
-
- options := r.craneOptions(ctxTimeout)
-
- // Generate the registry credential keychain either from static credentials or using cloud OIDC
- keychain, err := r.keychain(ctx, obj)
- if err != nil {
- e := serror.NewGeneric(
- fmt.Errorf("failed to get credential: %w", err),
- sourcev1.AuthenticationFailedReason,
- )
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
- return sreconcile.ResultEmpty, e
- }
- options = append(options, crane.WithAuthFromKeychain(keychain))
-
- if obj.Spec.Provider != sourcev1.GenericOCIProvider {
- auth, authErr := r.oidcAuth(ctxTimeout, obj)
- if authErr != nil && !errors.Is(authErr, oci.ErrUnconfiguredProvider) {
- e := serror.NewGeneric(
- fmt.Errorf("failed to get credential from %s: %w", obj.Spec.Provider, authErr),
- sourcev1.AuthenticationFailedReason,
- )
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
- return sreconcile.ResultEmpty, e
- }
- if auth != nil {
- options = append(options, crane.WithAuth(auth))
- }
- }
-
- // Generate the transport for remote operations
- transport, err := r.transport(ctx, obj)
- if err != nil {
- e := serror.NewGeneric(
- fmt.Errorf("failed to generate transport for '%s': %w", obj.Spec.URL, err),
- sourcev1.AuthenticationFailedReason,
- )
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
- return sreconcile.ResultEmpty, e
- }
- if transport != nil {
- options = append(options, crane.WithTransport(transport))
- }
-
- // Determine which artifact revision to pull
- url, err := r.getArtifactURL(obj, options)
- if err != nil {
- if _, ok := err.(invalidOCIURLError); ok {
- e := serror.NewStalling(
- fmt.Errorf("URL validation failed for '%s': %w", obj.Spec.URL, err),
- sourcev1.URLInvalidReason)
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
- return sreconcile.ResultEmpty, e
- }
-
- e := serror.NewGeneric(
- fmt.Errorf("failed to determine the artifact tag for '%s': %w", obj.Spec.URL, err),
- sourcev1.ReadOperationFailedReason)
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
- return sreconcile.ResultEmpty, e
- }
-
- // Pull artifact from the remote container registry
- img, err := crane.Pull(url, options...)
- if err != nil {
- e := serror.NewGeneric(
- fmt.Errorf("failed to pull artifact from '%s': %w", obj.Spec.URL, err),
- sourcev1.OCIPullFailedReason,
- )
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
- return sreconcile.ResultEmpty, e
- }
-
- // Determine the artifact SHA256 digest
- imgDigest, err := img.Digest()
- if err != nil {
- e := serror.NewGeneric(
- fmt.Errorf("failed to determine artifact digest: %w", err),
- sourcev1.OCILayerOperationFailedReason,
- )
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
- return sreconcile.ResultEmpty, e
- }
-
- // Set the internal revision to the remote digest hex
- revision := imgDigest.Hex
-
- // Copy the OCI annotations to the internal artifact metadata
- manifest, err := img.Manifest()
- if err != nil {
- e := serror.NewGeneric(
- fmt.Errorf("failed to parse artifact manifest: %w", err),
- sourcev1.OCILayerOperationFailedReason,
- )
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
- return sreconcile.ResultEmpty, e
- }
-
- m := &sourcev1.Artifact{
- Revision: revision,
- Metadata: manifest.Annotations,
- }
- m.DeepCopyInto(metadata)
-
- // Mark observations about the revision on the object
- defer func() {
- if !obj.GetArtifact().HasRevision(revision) {
- message := fmt.Sprintf("new digest '%s' for '%s'", revision, url)
- conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", message)
- conditions.MarkReconciling(obj, "NewRevision", message)
- }
- }()
-
- // Extract the content of the first artifact layer
- if !obj.GetArtifact().HasRevision(revision) {
- layers, err := img.Layers()
- if err != nil {
- e := serror.NewGeneric(
- fmt.Errorf("failed to parse artifact layers: %w", err),
- sourcev1.OCILayerOperationFailedReason,
- )
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
- return sreconcile.ResultEmpty, e
- }
-
- if len(layers) < 1 {
- e := serror.NewGeneric(
- fmt.Errorf("no layers found in artifact"),
- sourcev1.OCILayerOperationFailedReason,
- )
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
- return sreconcile.ResultEmpty, e
- }
-
- blob, err := layers[0].Compressed()
- if err != nil {
- e := serror.NewGeneric(
- fmt.Errorf("failed to extract the first layer from artifact: %w", err),
- sourcev1.OCILayerOperationFailedReason,
- )
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
- return sreconcile.ResultEmpty, e
- }
-
- if _, err = untar.Untar(blob, dir); err != nil {
- e := serror.NewGeneric(
- fmt.Errorf("failed to untar the first layer from artifact: %w", err),
- sourcev1.OCILayerOperationFailedReason,
- )
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
- return sreconcile.ResultEmpty, e
- }
- }
-
- conditions.Delete(obj, sourcev1.FetchFailedCondition)
- return sreconcile.ResultSuccess, nil
-}
-
-// parseRepositoryURL validates and extracts the repository URL.
-func (r *OCIRepositoryReconciler) parseRepositoryURL(obj *sourcev1.OCIRepository) (string, error) {
- if !strings.HasPrefix(obj.Spec.URL, sourcev1.OCIRepositoryPrefix) {
- return "", fmt.Errorf("URL must be in format 'oci:////'")
- }
-
- url := strings.TrimPrefix(obj.Spec.URL, sourcev1.OCIRepositoryPrefix)
- ref, err := name.ParseReference(url)
- if err != nil {
- return "", err
- }
-
- imageName := strings.TrimPrefix(url, ref.Context().RegistryStr())
- if s := strings.Split(imageName, ":"); len(s) > 1 {
- return "", fmt.Errorf("URL must not contain a tag; remove ':%s'", s[1])
- }
-
- return ref.Context().Name(), nil
-}
-
-// getArtifactURL determines which tag or digest should be used and returns the OCI artifact FQN.
-func (r *OCIRepositoryReconciler) getArtifactURL(obj *sourcev1.OCIRepository, options []crane.Option) (string, error) {
- url, err := r.parseRepositoryURL(obj)
- if err != nil {
- return "", invalidOCIURLError{err}
- }
-
- if obj.Spec.Reference != nil {
- if obj.Spec.Reference.Digest != "" {
- return fmt.Sprintf("%s@%s", url, obj.Spec.Reference.Digest), nil
- }
-
- if obj.Spec.Reference.SemVer != "" {
- tag, err := r.getTagBySemver(url, obj.Spec.Reference.SemVer, options)
- if err != nil {
- return "", err
- }
- return fmt.Sprintf("%s:%s", url, tag), nil
- }
-
- if obj.Spec.Reference.Tag != "" {
- return fmt.Sprintf("%s:%s", url, obj.Spec.Reference.Tag), nil
- }
- }
-
- return url, nil
-}
-
-// getTagBySemver call the remote container registry, fetches all the tags from the repository,
-// and returns the latest tag according to the semver expression.
-func (r *OCIRepositoryReconciler) getTagBySemver(url, exp string, options []crane.Option) (string, error) {
- tags, err := crane.ListTags(url, options...)
- if err != nil {
- return "", err
- }
-
- constraint, err := semver.NewConstraint(exp)
- if err != nil {
- return "", fmt.Errorf("semver '%s' parse error: %w", exp, err)
- }
-
- var matchingVersions []*semver.Version
- for _, t := range tags {
- v, err := version.ParseVersion(t)
- if err != nil {
- continue
- }
-
- if constraint.Check(v) {
- matchingVersions = append(matchingVersions, v)
- }
- }
-
- if len(matchingVersions) == 0 {
- return "", fmt.Errorf("no match found for semver: %s", exp)
- }
-
- sort.Sort(sort.Reverse(semver.Collection(matchingVersions)))
- return matchingVersions[0].Original(), nil
-}
-
-// keychain generates the credential keychain based on the resource
-// configuration. If no auth is specified a default keychain with
-// anonymous access is returned
-func (r *OCIRepositoryReconciler) keychain(ctx context.Context, obj *sourcev1.OCIRepository) (authn.Keychain, error) {
- pullSecretNames := sets.NewString()
-
- // lookup auth secret
- if obj.Spec.SecretRef != nil {
- pullSecretNames.Insert(obj.Spec.SecretRef.Name)
- }
-
- // lookup service account
- if obj.Spec.ServiceAccountName != "" {
- serviceAccountName := obj.Spec.ServiceAccountName
- serviceAccount := corev1.ServiceAccount{}
- err := r.Get(ctx, types.NamespacedName{Namespace: obj.Namespace, Name: serviceAccountName}, &serviceAccount)
- if err != nil {
- return nil, err
- }
- for _, ips := range serviceAccount.ImagePullSecrets {
- pullSecretNames.Insert(ips.Name)
- }
- }
-
- // if no pullsecrets available return DefaultKeyChain
- if len(pullSecretNames) == 0 {
- return authn.DefaultKeychain, nil
- }
-
- // lookup image pull secrets
- imagePullSecrets := make([]corev1.Secret, len(pullSecretNames))
- for i, imagePullSecretName := range pullSecretNames.List() {
- imagePullSecret := corev1.Secret{}
- err := r.Get(ctx, types.NamespacedName{Namespace: obj.Namespace, Name: imagePullSecretName}, &imagePullSecret)
- if err != nil {
- r.eventLogf(ctx, obj, events.EventSeverityTrace, sourcev1.AuthenticationFailedReason,
- "auth secret '%s' not found", imagePullSecretName)
- return nil, err
- }
- imagePullSecrets[i] = imagePullSecret
- }
-
- return k8schain.NewFromPullSecrets(ctx, imagePullSecrets)
-}
-
-// transport clones the default transport from remote and when a certSecretRef is specified,
-// the returned transport will include the TLS client and/or CA certificates.
-func (r *OCIRepositoryReconciler) transport(ctx context.Context, obj *sourcev1.OCIRepository) (http.RoundTripper, error) {
- if obj.Spec.CertSecretRef == nil || obj.Spec.CertSecretRef.Name == "" {
- return nil, nil
- }
-
- certSecretName := types.NamespacedName{
- Namespace: obj.Namespace,
- Name: obj.Spec.CertSecretRef.Name,
- }
- var certSecret corev1.Secret
- if err := r.Get(ctx, certSecretName, &certSecret); err != nil {
- return nil, err
- }
-
- transport := remote.DefaultTransport.Clone()
- tlsConfig := transport.TLSClientConfig
-
- if clientCert, ok := certSecret.Data[oci.ClientCert]; ok {
- // parse and set client cert and secret
- if clientKey, ok := certSecret.Data[oci.ClientKey]; ok {
- cert, err := tls.X509KeyPair(clientCert, clientKey)
- if err != nil {
- return nil, err
- }
- tlsConfig.Certificates = append(tlsConfig.Certificates, cert)
- } else {
- return nil, fmt.Errorf("'%s' found in secret, but no %s", oci.ClientCert, oci.ClientKey)
- }
- }
-
- if caCert, ok := certSecret.Data[oci.CACert]; ok {
- syscerts, err := x509.SystemCertPool()
- if err != nil {
- return nil, err
- }
- syscerts.AppendCertsFromPEM(caCert)
- tlsConfig.RootCAs = syscerts
- }
- return transport, nil
-
-}
-
-// oidcAuth generates the OIDC credential authenticator based on the specified cloud provider.
-func (r *OCIRepositoryReconciler) oidcAuth(ctx context.Context, obj *sourcev1.OCIRepository) (authn.Authenticator, error) {
- url := strings.TrimPrefix(obj.Spec.URL, sourcev1.OCIRepositoryPrefix)
- ref, err := name.ParseReference(url)
- if err != nil {
- return nil, fmt.Errorf("failed to parse URL '%s': %w", obj.Spec.URL, err)
- }
-
- opts := login.ProviderOptions{}
- switch obj.Spec.Provider {
- case sourcev1.AmazonOCIProvider:
- opts.AwsAutoLogin = true
- case sourcev1.AzureOCIProvider:
- opts.AzureAutoLogin = true
- case sourcev1.GoogleOCIProvider:
- opts.GcpAutoLogin = true
- }
-
- return login.NewManager().Login(ctx, url, ref, opts)
-}
-
-// craneOptions sets the auth headers, timeout and user agent
-// for all operations against remote container registries.
-func (r *OCIRepositoryReconciler) craneOptions(ctx context.Context) []crane.Option {
- options := []crane.Option{
- crane.WithContext(ctx),
- crane.WithUserAgent(oci.UserAgent),
- }
- return options
-}
-
-// reconcileStorage ensures the current state of the storage matches the
-// desired and previously observed state.
-//
-// The garbage collection is executed based on the flag configured settings and
-// may remove files that are beyond their TTL or the maximum number of files
-// to survive a collection cycle.
-// If the Artifact in the Status of the object disappeared from the Storage,
-// it is removed from the object.
-// If the object does not have an Artifact in its Status, a Reconciling
-// condition is added.
-// The hostname of any URL in the Status of the object are updated, to ensure
-// they match the Storage server hostname of current runtime.
-func (r *OCIRepositoryReconciler) reconcileStorage(ctx context.Context,
- obj *sourcev1.OCIRepository, _ *sourcev1.Artifact, _ string) (sreconcile.Result, error) {
- // Garbage collect previous advertised artifact(s) from storage
- _ = r.garbageCollect(ctx, obj)
-
- // Determine if the advertised artifact is still in storage
- if artifact := obj.GetArtifact(); artifact != nil && !r.Storage.ArtifactExist(*artifact) {
- obj.Status.Artifact = nil
- obj.Status.URL = ""
- // Remove the condition as the artifact doesn't exist.
- conditions.Delete(obj, sourcev1.ArtifactInStorageCondition)
- }
-
- // Record that we do not have an artifact
- if obj.GetArtifact() == nil {
- conditions.MarkReconciling(obj, "NoArtifact", "no artifact for resource in storage")
- conditions.Delete(obj, sourcev1.ArtifactInStorageCondition)
- return sreconcile.ResultSuccess, nil
- }
-
- // Always update URLs to ensure hostname is up-to-date
- r.Storage.SetArtifactURL(obj.GetArtifact())
- obj.Status.URL = r.Storage.SetHostname(obj.Status.URL)
-
- return sreconcile.ResultSuccess, nil
-}
-
-// reconcileArtifact archives a new Artifact to the Storage, if the current
-// (Status) data on the object does not match the given.
-//
-// The inspection of the given data to the object is differed, ensuring any
-// stale observations like v1beta2.ArtifactOutdatedCondition are removed.
-// If the given Artifact does not differ from the object's current, it returns
-// early.
-// On a successful archive, the Artifact in the Status of the object is set,
-// and the symlink in the Storage is updated to its path.
-func (r *OCIRepositoryReconciler) reconcileArtifact(ctx context.Context,
- obj *sourcev1.OCIRepository, metadata *sourcev1.Artifact, dir string) (sreconcile.Result, error) {
- // Calculate revision
- revision := metadata.Revision
-
- // Create artifact
- artifact := r.Storage.NewArtifactFor(obj.Kind, obj, revision, fmt.Sprintf("%s.tar.gz", revision))
-
- // Set the ArtifactInStorageCondition if there's no drift.
- defer func() {
- if obj.GetArtifact().HasRevision(artifact.Revision) {
- conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition)
- conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason,
- "stored artifact for digest '%s'", artifact.Revision)
- }
- }()
-
- // The artifact is up-to-date
- if obj.GetArtifact().HasRevision(artifact.Revision) {
- r.eventLogf(ctx, obj, events.EventTypeTrace, sourcev1.ArtifactUpToDateReason,
- "artifact up-to-date with remote digest: '%s'", artifact.Revision)
- return sreconcile.ResultSuccess, nil
- }
-
- // Ensure target path exists and is a directory
- if f, err := os.Stat(dir); err != nil {
- e := serror.NewGeneric(
- fmt.Errorf("failed to stat source path: %w", err),
- sourcev1.StatOperationFailedReason,
- )
- conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
- return sreconcile.ResultEmpty, e
- } else if !f.IsDir() {
- e := serror.NewGeneric(
- fmt.Errorf("source path '%s' is not a directory", dir),
- sourcev1.InvalidPathReason,
- )
- conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
- return sreconcile.ResultEmpty, e
- }
-
- // Ensure artifact directory exists and acquire lock
- if err := r.Storage.MkdirAll(artifact); err != nil {
- e := serror.NewGeneric(
- fmt.Errorf("failed to create artifact directory: %w", err),
- sourcev1.DirCreationFailedReason,
- )
- conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
- return sreconcile.ResultEmpty, e
- }
- unlock, err := r.Storage.Lock(artifact)
- if err != nil {
- return sreconcile.ResultEmpty, serror.NewGeneric(
- fmt.Errorf("failed to acquire lock for artifact: %w", err),
- meta.FailedReason,
- )
- }
- defer unlock()
-
- // Archive directory to storage
- if err := r.Storage.Archive(&artifact, dir, nil); err != nil {
- e := serror.NewGeneric(
- fmt.Errorf("unable to archive artifact to storage: %s", err),
- sourcev1.ArchiveOperationFailedReason,
- )
- conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
- return sreconcile.ResultEmpty, e
- }
-
- // Record it on the object
- obj.Status.Artifact = artifact.DeepCopy()
- obj.Status.Artifact.Metadata = metadata.Metadata
-
- // Update symlink on a "best effort" basis
- url, err := r.Storage.Symlink(artifact, "latest.tar.gz")
- if err != nil {
- r.eventLogf(ctx, obj, events.EventTypeTrace, sourcev1.SymlinkUpdateFailedReason,
- "failed to update status URL symlink: %s", err)
- }
- if url != "" {
- obj.Status.URL = url
- }
- conditions.Delete(obj, sourcev1.StorageOperationFailedCondition)
- return sreconcile.ResultSuccess, nil
-}
-
-// reconcileDelete handles the deletion of the object.
-// It first garbage collects all Artifacts for the object from the Storage.
-// Removing the finalizer from the object if successful.
-func (r *OCIRepositoryReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.OCIRepository) (sreconcile.Result, error) {
- // Garbage collect the resource's artifacts
- if err := r.garbageCollect(ctx, obj); err != nil {
- // Return the error so we retry the failed garbage collection
- return sreconcile.ResultEmpty, err
- }
-
- // Remove our finalizer from the list
- controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer)
-
- // Stop reconciliation as the object is being deleted
- return sreconcile.ResultEmpty, nil
-}
-
-// garbageCollect performs a garbage collection for the given object.
-//
-// It removes all but the current Artifact from the Storage, unless the
-// deletion timestamp on the object is set. Which will result in the
-// removal of all Artifacts for the objects.
-func (r *OCIRepositoryReconciler) garbageCollect(ctx context.Context, obj *sourcev1.OCIRepository) error {
- if !obj.DeletionTimestamp.IsZero() {
- if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil {
- return serror.NewGeneric(
- fmt.Errorf("garbage collection for deleted resource failed: %w", err),
- "GarbageCollectionFailed",
- )
- } else if deleted != "" {
- r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded",
- "garbage collected artifacts for deleted resource")
- }
- obj.Status.Artifact = nil
- return nil
- }
- if obj.GetArtifact() != nil {
- delFiles, err := r.Storage.GarbageCollect(ctx, *obj.GetArtifact(), time.Second*5)
- if err != nil {
- return serror.NewGeneric(
- fmt.Errorf("garbage collection of artifacts failed: %w", err),
- "GarbageCollectionFailed",
- )
- }
- if len(delFiles) > 0 {
- r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded",
- fmt.Sprintf("garbage collected %d artifacts", len(delFiles)))
- return nil
- }
- }
- return nil
-}
-
-// eventLogf records events, and logs at the same time.
-//
-// This log is different from the debug log in the EventRecorder, in the sense
-// that this is a simple log. While the debug log contains complete details
-// about the event.
-func (r *OCIRepositoryReconciler) eventLogf(ctx context.Context,
- obj runtime.Object, eventType string, reason string, messageFmt string, args ...interface{}) {
- msg := fmt.Sprintf(messageFmt, args...)
- // Log and emit event.
- if eventType == corev1.EventTypeWarning {
- ctrl.LoggerFrom(ctx).Error(errors.New(reason), msg)
- } else {
- ctrl.LoggerFrom(ctx).Info(msg)
- }
- r.Eventf(obj, eventType, reason, msg)
-}
-
-// notify emits notification related to the reconciliation.
-func (r *OCIRepositoryReconciler) notify(ctx context.Context,
- oldObj, newObj *sourcev1.OCIRepository, res sreconcile.Result, resErr error) {
- // Notify successful reconciliation for new artifact and recovery from any
- // failure.
- if resErr == nil && res == sreconcile.ResultSuccess && newObj.Status.Artifact != nil {
- annotations := map[string]string{
- sourcev1.GroupVersion.Group + "/revision": newObj.Status.Artifact.Revision,
- sourcev1.GroupVersion.Group + "/checksum": newObj.Status.Artifact.Checksum,
- }
-
- var oldChecksum string
- if oldObj.GetArtifact() != nil {
- oldChecksum = oldObj.GetArtifact().Checksum
- }
-
- message := fmt.Sprintf("stored artifact with digest '%s' from '%s'", newObj.Status.Artifact.Revision, newObj.Spec.URL)
-
- // enrich message with upstream annotations if found
- if info := newObj.GetArtifact().Metadata; info != nil {
- var source, revision string
- if val, ok := info[oci.SourceAnnotation]; ok {
- source = val
- }
- if val, ok := info[oci.RevisionAnnotation]; ok {
- revision = val
- }
- if source != "" && revision != "" {
- message = fmt.Sprintf("%s, origin source '%s', origin revision '%s'", message, source, revision)
- }
- }
-
- // Notify on new artifact and failure recovery.
- if oldChecksum != newObj.GetArtifact().Checksum {
- r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal,
- "NewArtifact", message)
- ctrl.LoggerFrom(ctx).Info(message)
- } else {
- if sreconcile.FailureRecovery(oldObj, newObj, ociRepositoryFailConditions) {
- r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal,
- meta.SucceededReason, message)
- ctrl.LoggerFrom(ctx).Info(message)
- }
- }
- }
-}
diff --git a/controllers/ocirepository_controller_test.go b/controllers/ocirepository_controller_test.go
deleted file mode 100644
index b72413b1f..000000000
--- a/controllers/ocirepository_controller_test.go
+++ /dev/null
@@ -1,1599 +0,0 @@
-/*
-Copyright 2022 The Flux authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-package controllers
-
-import (
- "crypto/rand"
- "crypto/rsa"
- "crypto/tls"
- "crypto/x509"
- "crypto/x509/pkix"
- "encoding/pem"
- "errors"
- "fmt"
- "math/big"
- "net"
- "net/http"
- "net/http/httptest"
- "net/url"
- "os"
- "path"
- "path/filepath"
- "strings"
- "testing"
- "time"
-
- "github.com/darkowlzz/controller-check/status"
- "github.com/fluxcd/pkg/apis/meta"
- "github.com/fluxcd/pkg/oci"
- "github.com/fluxcd/pkg/runtime/conditions"
- "github.com/fluxcd/pkg/runtime/patch"
- "github.com/fluxcd/pkg/untar"
- sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
- serror "github.com/fluxcd/source-controller/internal/error"
- sreconcile "github.com/fluxcd/source-controller/internal/reconcile"
- "github.com/fluxcd/source-controller/pkg/git"
- "github.com/google/go-containerregistry/pkg/authn"
- "github.com/google/go-containerregistry/pkg/crane"
- "github.com/google/go-containerregistry/pkg/registry"
- gcrv1 "github.com/google/go-containerregistry/pkg/v1"
- "github.com/google/go-containerregistry/pkg/v1/mutate"
- . "github.com/onsi/gomega"
- corev1 "k8s.io/api/core/v1"
- apierrors "k8s.io/apimachinery/pkg/api/errors"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/client-go/tools/record"
- kstatus "sigs.k8s.io/cli-utils/pkg/kstatus/status"
- "sigs.k8s.io/controller-runtime/pkg/client"
- fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake"
- "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
-)
-
-func TestOCIRepository_Reconcile(t *testing.T) {
- g := NewWithT(t)
-
- // Registry server with public images
- tmpDir := t.TempDir()
- regServer, err := setupRegistryServer(ctx, tmpDir, registryOptions{})
- if err != nil {
- g.Expect(err).ToNot(HaveOccurred())
- }
-
- podinfoVersions, err := pushMultiplePodinfoImages(regServer.registryHost, "6.1.4", "6.1.5", "6.1.6")
-
- tests := []struct {
- name string
- url string
- tag string
- semver string
- digest string
- assertArtifact []artifactFixture
- }{
- {
- name: "public tag",
- url: podinfoVersions["6.1.6"].url,
- tag: podinfoVersions["6.1.6"].tag,
- digest: podinfoVersions["6.1.6"].digest.Hex,
- assertArtifact: []artifactFixture{
- {
- expectedPath: "kustomize/deployment.yaml",
- expectedChecksum: "6fd625effe6bb805b6a78943ee082a4412e763edb7fcaed6e8fe644d06cbf423",
- },
- {
- expectedPath: "kustomize/hpa.yaml",
- expectedChecksum: "d20e92e3b2926ebfee1644be0f4d0abadebfa95a8005c12f71bfd534a4be4ff9",
- },
- },
- },
- {
- name: "public semver",
- url: podinfoVersions["6.1.5"].url,
- semver: ">= 6.1 <= 6.1.5",
- digest: podinfoVersions["6.1.5"].digest.Hex,
- assertArtifact: []artifactFixture{
- {
- expectedPath: "kustomize/deployment.yaml",
- expectedChecksum: "dce4f5f780a8e8994b06031e5b567bf488ceaaaabd9bd3fc278b4f3bfc8c577b",
- },
- {
- expectedPath: "kustomize/hpa.yaml",
- expectedChecksum: "d20e92e3b2926ebfee1644be0f4d0abadebfa95a8005c12f71bfd534a4be4ff9",
- },
- },
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
-
- ns, err := testEnv.CreateNamespace(ctx, "ocirepository-reconcile-test")
- g.Expect(err).ToNot(HaveOccurred())
- defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }()
-
- obj := &sourcev1.OCIRepository{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "ocirepository-reconcile",
- Namespace: ns.Name,
- },
- Spec: sourcev1.OCIRepositorySpec{
- URL: tt.url,
- Interval: metav1.Duration{Duration: 60 * time.Minute},
- Reference: &sourcev1.OCIRepositoryRef{},
- },
- }
-
- if tt.tag != "" {
- obj.Spec.Reference.Tag = tt.tag
- }
- if tt.semver != "" {
- obj.Spec.Reference.SemVer = tt.semver
- }
-
- g.Expect(testEnv.Create(ctx, obj)).To(Succeed())
-
- key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace}
-
- // Wait for the finalizer to be set
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, obj); err != nil {
- return false
- }
- return len(obj.Finalizers) > 0
- }, timeout).Should(BeTrue())
-
- // Wait for the object to be Ready
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, obj); err != nil {
- return false
- }
- if !conditions.IsReady(obj) {
- return false
- }
- readyCondition := conditions.Get(obj, meta.ReadyCondition)
- return obj.Generation == readyCondition.ObservedGeneration &&
- obj.Generation == obj.Status.ObservedGeneration
- }, timeout).Should(BeTrue())
-
- // Check if the revision matches the expected digest
- g.Expect(obj.Status.Artifact.Revision).To(Equal(tt.digest))
-
- // Check if the metadata matches the expected annotations
- g.Expect(obj.Status.Artifact.Metadata[oci.SourceAnnotation]).To(ContainSubstring("podinfo"))
- g.Expect(obj.Status.Artifact.Metadata[oci.RevisionAnnotation]).To(ContainSubstring(tt.tag))
-
- // Check if the artifact storage path matches the expected file path
- localPath := testStorage.LocalPath(*obj.Status.Artifact)
- t.Logf("artifact local path: %s", localPath)
-
- f, err := os.Open(localPath)
- g.Expect(err).ToNot(HaveOccurred())
- defer f.Close()
-
- // create a tmp directory to extract artifact
- tmp, err := os.MkdirTemp("", "ocirepository-test-")
- g.Expect(err).ToNot(HaveOccurred())
- defer os.RemoveAll(tmp)
-
- ep, err := untar.Untar(f, tmp)
- g.Expect(err).ToNot(HaveOccurred())
- t.Logf("extracted summary: %s", ep)
-
- for _, af := range tt.assertArtifact {
- expectedFile := filepath.Join(tmp, af.expectedPath)
- g.Expect(expectedFile).To(BeAnExistingFile())
-
- f2, err := os.Open(expectedFile)
- g.Expect(err).ToNot(HaveOccurred())
- defer f2.Close()
-
- h := testStorage.Checksum(f2)
- t.Logf("file %q hash: %q", expectedFile, h)
- g.Expect(h).To(Equal(af.expectedChecksum))
- }
-
- // Check if the object status is valid
- condns := &status.Conditions{NegativePolarity: ociRepositoryReadyCondition.NegativePolarity}
- checker := status.NewChecker(testEnv.Client, condns)
- checker.CheckErr(ctx, obj)
-
- // kstatus client conformance check
- u, err := patch.ToUnstructured(obj)
- g.Expect(err).ToNot(HaveOccurred())
- res, err := kstatus.Compute(u)
- g.Expect(err).ToNot(HaveOccurred())
- g.Expect(res.Status).To(Equal(kstatus.CurrentStatus))
-
- // Patch the object with reconcile request annotation.
- patchHelper, err := patch.NewHelper(obj, testEnv.Client)
- g.Expect(err).ToNot(HaveOccurred())
- annotations := map[string]string{
- meta.ReconcileRequestAnnotation: "now",
- }
- obj.SetAnnotations(annotations)
- g.Expect(patchHelper.Patch(ctx, obj)).ToNot(HaveOccurred())
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, obj); err != nil {
- return false
- }
- return obj.Status.LastHandledReconcileAt == "now"
- }, timeout).Should(BeTrue())
-
- // Wait for the object to be deleted
- g.Expect(testEnv.Delete(ctx, obj)).To(Succeed())
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, obj); err != nil {
- return apierrors.IsNotFound(err)
- }
- return false
- }, timeout).Should(BeTrue())
- })
- }
-}
-
-func TestOCIRepository_reconcileSource_authStrategy(t *testing.T) {
- type secretOptions struct {
- username string
- password string
- includeSA bool
- includeSecret bool
- }
-
- pool := x509.NewCertPool()
- pool.AppendCertsFromPEM(tlsCA)
-
- tests := []struct {
- name string
- url string
- registryOpts registryOptions
- craneOpts []crane.Option
- secretOpts secretOptions
- tlsCertSecret *corev1.Secret
- want sreconcile.Result
- wantErr bool
- assertConditions []metav1.Condition
- }{
- {
- name: "HTTP without basic auth",
- want: sreconcile.ResultSuccess,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new digest '' for ''"),
- *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new digest '' for ''"),
- },
- },
- {
- name: "HTTP with basic auth secret",
- want: sreconcile.ResultSuccess,
- registryOpts: registryOptions{
- withBasicAuth: true,
- },
- craneOpts: []crane.Option{crane.WithAuth(&authn.Basic{
- Username: testRegistryUsername,
- Password: testRegistryPassword,
- }),
- },
- secretOpts: secretOptions{
- username: testRegistryUsername,
- password: testRegistryPassword,
- includeSecret: true,
- },
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new digest '' for ''"),
- *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new digest '' for ''"),
- },
- },
- {
- name: "HTTP with serviceaccount",
- want: sreconcile.ResultSuccess,
- registryOpts: registryOptions{
- withBasicAuth: true,
- },
- craneOpts: []crane.Option{crane.WithAuth(&authn.Basic{
- Username: testRegistryUsername,
- Password: testRegistryPassword,
- }),
- },
- secretOpts: secretOptions{
- username: testRegistryUsername,
- password: testRegistryPassword,
- includeSA: true,
- },
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new digest '' for ''"),
- *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new digest '' for ''"),
- },
- },
- {
- name: "HTTP registry - basic auth with missing secret",
- want: sreconcile.ResultEmpty,
- registryOpts: registryOptions{
- withBasicAuth: true,
- },
- wantErr: true,
- craneOpts: []crane.Option{crane.WithAuth(&authn.Basic{
- Username: testRegistryUsername,
- Password: testRegistryPassword,
- }),
- },
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.OCIPullFailedReason, "failed to pull artifact from "),
- },
- },
- {
- name: "HTTP registry - basic auth with invalid secret",
- want: sreconcile.ResultEmpty,
- wantErr: true,
- registryOpts: registryOptions{
- withBasicAuth: true,
- },
- craneOpts: []crane.Option{crane.WithAuth(&authn.Basic{
- Username: testRegistryUsername,
- Password: testRegistryPassword,
- }),
- },
- secretOpts: secretOptions{
- username: "wrong-pass",
- password: "wrong-pass",
- includeSecret: true,
- },
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.OCIPullFailedReason, "failed to pull artifact from "),
- },
- },
- {
- name: "HTTP registry - basic auth with invalid serviceaccount",
- want: sreconcile.ResultEmpty,
- wantErr: true,
- registryOpts: registryOptions{
- withBasicAuth: true,
- },
- craneOpts: []crane.Option{crane.WithAuth(&authn.Basic{
- Username: testRegistryUsername,
- Password: testRegistryPassword,
- }),
- },
- secretOpts: secretOptions{
- username: "wrong-pass",
- password: "wrong-pass",
- includeSA: true,
- },
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.OCIPullFailedReason, "failed to pull artifact from "),
- },
- },
- {
- name: "HTTPS with valid certfile",
- want: sreconcile.ResultSuccess,
- registryOpts: registryOptions{
- withTLS: true,
- },
- craneOpts: []crane.Option{crane.WithTransport(&http.Transport{
- TLSClientConfig: &tls.Config{
- RootCAs: pool,
- },
- }),
- },
- tlsCertSecret: &corev1.Secret{
- ObjectMeta: metav1.ObjectMeta{
- Name: "ca-file",
- },
- Data: map[string][]byte{
- "caFile": tlsCA,
- },
- },
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new digest '' for ''"),
- *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new digest '' for ''"),
- },
- },
- {
- name: "HTTPS without certfile",
- want: sreconcile.ResultEmpty,
- wantErr: true,
- registryOpts: registryOptions{
- withTLS: true,
- },
- craneOpts: []crane.Option{crane.WithTransport(&http.Transport{
- TLSClientConfig: &tls.Config{
- RootCAs: pool,
- },
- }),
- },
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.OCIPullFailedReason, "failed to pull artifact from "),
- },
- },
- {
- name: "HTTPS with invalid certfile",
- want: sreconcile.ResultEmpty,
- wantErr: true,
- registryOpts: registryOptions{
- withTLS: true,
- },
- craneOpts: []crane.Option{crane.WithTransport(&http.Transport{
- TLSClientConfig: &tls.Config{
- RootCAs: pool,
- },
- }),
- },
- tlsCertSecret: &corev1.Secret{
- ObjectMeta: metav1.ObjectMeta{
- Name: "ca-file",
- },
- Data: map[string][]byte{
- "caFile": []byte("invalid"),
- },
- },
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.OCIPullFailedReason, "failed to pull artifact from "),
- },
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
-
- builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme())
-
- obj := &sourcev1.OCIRepository{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "auth-strategy-",
- },
- Spec: sourcev1.OCIRepositorySpec{
- Interval: metav1.Duration{Duration: interval},
- Timeout: &metav1.Duration{Duration: timeout},
- },
- }
-
- workspaceDir := t.TempDir()
- server, err := setupRegistryServer(ctx, workspaceDir, tt.registryOpts)
-
- g.Expect(err).NotTo(HaveOccurred())
-
- img, err := createPodinfoImageFromTar("podinfo-6.1.6.tar", "6.1.6", server.registryHost, tt.craneOpts...)
- g.Expect(err).ToNot(HaveOccurred())
- obj.Spec.URL = img.url
- obj.Spec.Reference = &sourcev1.OCIRepositoryRef{
- Tag: img.tag,
- }
-
- if tt.secretOpts.username != "" && tt.secretOpts.password != "" {
- secret := &corev1.Secret{
- ObjectMeta: metav1.ObjectMeta{
- Name: "auth-secretref",
- },
- Type: corev1.SecretTypeDockerConfigJson,
- Data: map[string][]byte{
- ".dockerconfigjson": []byte(fmt.Sprintf(`{"auths": {%q: {"username": %q, "password": %q}}}`,
- server.registryHost, tt.secretOpts.username, tt.secretOpts.password)),
- },
- }
-
- builder.WithObjects(secret)
-
- if tt.secretOpts.includeSA {
- serviceAccount := &corev1.ServiceAccount{
- ObjectMeta: metav1.ObjectMeta{
- Name: "sa-ocitest",
- },
- ImagePullSecrets: []corev1.LocalObjectReference{{Name: secret.Name}},
- }
- builder.WithObjects(serviceAccount)
- obj.Spec.ServiceAccountName = serviceAccount.Name
- }
-
- if tt.secretOpts.includeSecret {
- obj.Spec.SecretRef = &meta.LocalObjectReference{
- Name: secret.Name,
- }
- }
- }
-
- if tt.tlsCertSecret != nil {
- builder.WithObjects(tt.tlsCertSecret)
- obj.Spec.CertSecretRef = &meta.LocalObjectReference{
- Name: tt.tlsCertSecret.Name,
- }
- }
-
- r := &OCIRepositoryReconciler{
- Client: builder.Build(),
- EventRecorder: record.NewFakeRecorder(32),
- Storage: testStorage,
- }
-
- opts := r.craneOptions(ctx)
- opts = append(opts, crane.WithAuthFromKeychain(authn.DefaultKeychain))
- repoURL, err := r.getArtifactURL(obj, opts)
- g.Expect(err).To(BeNil())
-
- assertConditions := tt.assertConditions
- for k := range assertConditions {
- assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", img.digest.Hex)
- assertConditions[k].Message = strings.ReplaceAll(assertConditions[k].Message, "", repoURL)
- }
-
- tmpDir := t.TempDir()
- got, err := r.reconcileSource(ctx, obj, &sourcev1.Artifact{}, tmpDir)
- if tt.wantErr {
- g.Expect(err).ToNot(BeNil())
- } else {
- g.Expect(err).To(BeNil())
- }
- g.Expect(got).To(Equal(tt.want))
- g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions))
- })
- }
-}
-
-func TestOCIRepository_CertSecret(t *testing.T) {
- g := NewWithT(t)
-
- srv, rootCertPEM, clientCertPEM, clientKeyPEM, clientTLSCert, err := createTLSServer()
- g.Expect(err).ToNot(HaveOccurred())
-
- srv.StartTLS()
- defer srv.Close()
-
- transport := &http.Transport{
- TLSClientConfig: &tls.Config{},
- }
- // Use the server cert as a CA cert, so the client trusts the
- // server cert. (Only works because the server uses the same
- // cert in both roles).
- pool := x509.NewCertPool()
- pool.AddCert(srv.Certificate())
- transport.TLSClientConfig.RootCAs = pool
- transport.TLSClientConfig.Certificates = []tls.Certificate{clientTLSCert}
-
- srv.Client().Transport = transport
- pi, err := createPodinfoImageFromTar("podinfo-6.1.5.tar", "6.1.5", srv.URL, []crane.Option{
- crane.WithTransport(srv.Client().Transport),
- }...)
- g.Expect(err).NotTo(HaveOccurred())
-
- tlsSecretClientCert := corev1.Secret{
- StringData: map[string]string{
- oci.CACert: string(rootCertPEM),
- oci.ClientCert: string(clientCertPEM),
- oci.ClientKey: string(clientKeyPEM),
- },
- }
-
- tests := []struct {
- name string
- url string
- digest gcrv1.Hash
- certSecret *corev1.Secret
- expectreadyconition bool
- expectedstatusmessage string
- }{
- {
- name: "test connection with CACert, Client Cert and Private Key",
- url: pi.url,
- digest: pi.digest,
- certSecret: &tlsSecretClientCert,
- expectreadyconition: true,
- expectedstatusmessage: fmt.Sprintf("stored artifact for digest '%s'", pi.digest.Hex),
- },
- {
- name: "test connection with no secret",
- url: pi.url,
- digest: pi.digest,
- expectreadyconition: false,
- expectedstatusmessage: "unexpected status code 400 Bad Request: Client sent an HTTP request to an HTTPS server",
- },
- {
- name: "test connection with with incorrect private key",
- url: pi.url,
- digest: pi.digest,
- certSecret: &corev1.Secret{
- StringData: map[string]string{
- oci.CACert: string(rootCertPEM),
- oci.ClientCert: string(clientCertPEM),
- oci.ClientKey: string("invalid-key"),
- },
- },
- expectreadyconition: false,
- expectedstatusmessage: "failed to generate transport for '': tls: failed to find any PEM data in key input",
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
-
- ns, err := testEnv.CreateNamespace(ctx, "ocirepository-test")
- g.Expect(err).ToNot(HaveOccurred())
- defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }()
-
- obj := &sourcev1.OCIRepository{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "ocirepository-test-resource",
- Namespace: ns.Name,
- },
- Spec: sourcev1.OCIRepositorySpec{
- URL: tt.url,
- Interval: metav1.Duration{Duration: 60 * time.Minute},
- Reference: &sourcev1.OCIRepositoryRef{Digest: tt.digest.String()},
- },
- }
-
- if tt.certSecret != nil {
- tt.certSecret.ObjectMeta = metav1.ObjectMeta{
- GenerateName: "cert-secretref",
- Namespace: ns.Name,
- }
-
- g.Expect(testEnv.CreateAndWait(ctx, tt.certSecret)).To(Succeed())
- defer func() { g.Expect(testEnv.Delete(ctx, tt.certSecret)).To(Succeed()) }()
-
- obj.Spec.CertSecretRef = &meta.LocalObjectReference{Name: tt.certSecret.Name}
- }
-
- g.Expect(testEnv.Create(ctx, obj)).To(Succeed())
-
- key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace}
-
- resultobj := sourcev1.OCIRepository{}
-
- // Wait for the finalizer to be set
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, &resultobj); err != nil {
- return false
- }
- return len(resultobj.Finalizers) > 0
- }, timeout).Should(BeTrue())
-
- // Wait for the object to fail
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, &resultobj); err != nil {
- return false
- }
- readyCondition := conditions.Get(&resultobj, meta.ReadyCondition)
- if readyCondition == nil {
- return false
- }
- return obj.Generation == readyCondition.ObservedGeneration &&
- conditions.IsReady(&resultobj) == tt.expectreadyconition
- }, timeout).Should(BeTrue())
-
- tt.expectedstatusmessage = strings.ReplaceAll(tt.expectedstatusmessage, "", pi.url)
-
- readyCondition := conditions.Get(&resultobj, meta.ReadyCondition)
- g.Expect(readyCondition.Message).Should(ContainSubstring(tt.expectedstatusmessage))
-
- // Wait for the object to be deleted
- g.Expect(testEnv.Delete(ctx, &resultobj)).To(Succeed())
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, &resultobj); err != nil {
- return apierrors.IsNotFound(err)
- }
- return false
- }, timeout).Should(BeTrue())
- })
- }
-}
-
-func TestOCIRepository_reconcileSource_remoteReference(t *testing.T) {
- g := NewWithT(t)
-
- tmpDir := t.TempDir()
- server, err := setupRegistryServer(ctx, tmpDir, registryOptions{})
- g.Expect(err).ToNot(HaveOccurred())
-
- podinfoVersions, err := pushMultiplePodinfoImages(server.registryHost, "6.1.4", "6.1.5", "6.1.6")
- img6 := podinfoVersions["6.1.6"]
- img5 := podinfoVersions["6.1.5"]
-
- tests := []struct {
- name string
- reference *sourcev1.OCIRepositoryRef
- want sreconcile.Result
- wantErr bool
- wantRevision string
- assertConditions []metav1.Condition
- }{
- {
- name: "no reference (latest tag)",
- want: sreconcile.ResultSuccess,
- wantRevision: img6.digest.Hex,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new digest"),
- *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new digest"),
- },
- },
- {
- name: "tag reference",
- reference: &sourcev1.OCIRepositoryRef{
- Tag: "6.1.6",
- },
- want: sreconcile.ResultSuccess,
- wantRevision: img6.digest.Hex,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new digest"),
- *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new digest"),
- },
- },
- {
- name: "semver reference",
- reference: &sourcev1.OCIRepositoryRef{
- SemVer: ">= 6.1.5",
- },
- want: sreconcile.ResultSuccess,
- wantRevision: img6.digest.Hex,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new digest"),
- *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new digest"),
- },
- },
- {
- name: "digest reference",
- reference: &sourcev1.OCIRepositoryRef{
- Digest: img6.digest.String(),
- },
- wantRevision: img6.digest.Hex,
- want: sreconcile.ResultSuccess,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new digest"),
- *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new digest"),
- },
- },
- {
- name: "invalid tag reference",
- reference: &sourcev1.OCIRepositoryRef{
- Tag: "6.1.0",
- },
- want: sreconcile.ResultEmpty,
- wantErr: true,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.OCIPullFailedReason, "failed to pull artifact"),
- },
- },
- {
- name: "invalid semver reference",
- reference: &sourcev1.OCIRepositoryRef{
- SemVer: "<= 6.1.0",
- },
- want: sreconcile.ResultEmpty,
- wantErr: true,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.ReadOperationFailedReason, "failed to determine the artifact tag for 'oci://%s/podinfo': no match found for semver: <= 6.1.0", server.registryHost),
- },
- },
- {
- name: "invalid digest reference",
- reference: &sourcev1.OCIRepositoryRef{
- Digest: "invalid",
- },
- want: sreconcile.ResultEmpty,
- wantErr: true,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.OCIPullFailedReason, "failed to pull artifact"),
- },
- },
- {
- name: "semver should take precedence over tag",
- reference: &sourcev1.OCIRepositoryRef{
- SemVer: ">= 6.1.5",
- Tag: "6.1.5",
- },
- want: sreconcile.ResultSuccess,
- wantRevision: img6.digest.Hex,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new digest"),
- *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new digest"),
- },
- },
- {
- name: "digest should take precedence over semver",
- reference: &sourcev1.OCIRepositoryRef{
- Tag: "6.1.6",
- SemVer: ">= 6.1.6",
- Digest: img5.digest.String(),
- },
- want: sreconcile.ResultSuccess,
- wantRevision: img5.digest.Hex,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new digest"),
- *conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new digest"),
- },
- },
- }
-
- builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme())
-
- r := &OCIRepositoryReconciler{
- Client: builder.Build(),
- EventRecorder: record.NewFakeRecorder(32),
- Storage: testStorage,
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- obj := &sourcev1.OCIRepository{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "checkout-strategy-",
- },
- Spec: sourcev1.OCIRepositorySpec{
- URL: fmt.Sprintf("oci://%s/podinfo", server.registryHost),
- Interval: metav1.Duration{Duration: interval},
- Timeout: &metav1.Duration{Duration: timeout},
- },
- }
-
- if tt.reference != nil {
- obj.Spec.Reference = tt.reference
- }
-
- artifact := &sourcev1.Artifact{}
- tmpDir := t.TempDir()
- got, err := r.reconcileSource(ctx, obj, artifact, tmpDir)
- if tt.wantErr {
- g.Expect(err).To(HaveOccurred())
- } else {
- g.Expect(err).ToNot(HaveOccurred())
- g.Expect(artifact.Revision).To(Equal(tt.wantRevision))
- }
-
- g.Expect(got).To(Equal(tt.want))
- g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions))
- })
- }
-}
-
-func TestOCIRepository_reconcileArtifact(t *testing.T) {
- g := NewWithT(t)
-
- tests := []struct {
- name string
- targetPath string
- artifact *sourcev1.Artifact
- beforeFunc func(obj *sourcev1.OCIRepository)
- want sreconcile.Result
- wantErr bool
- assertArtifact *sourcev1.Artifact
- assertPaths []string
- assertConditions []metav1.Condition
- }{
- {
- name: "Archiving Artifact creates correct files and condition",
- targetPath: "testdata/oci/repository",
- artifact: &sourcev1.Artifact{
- Revision: "revision",
- },
- beforeFunc: func(obj *sourcev1.OCIRepository) {
- conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "new digest")
- },
- want: sreconcile.ResultSuccess,
- assertPaths: []string{
- "latest.tar.gz",
- },
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for digest"),
- },
- },
- {
- name: "No status changes if artifact is already present",
- artifact: &sourcev1.Artifact{
- Revision: "revision",
- },
- targetPath: "testdata/oci/repository",
- want: sreconcile.ResultSuccess,
- beforeFunc: func(obj *sourcev1.OCIRepository) {
- obj.Status.Artifact = &sourcev1.Artifact{
- Revision: "revision",
- }
- },
- assertArtifact: &sourcev1.Artifact{
- Revision: "revision",
- },
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for digest"),
- },
- },
- {
- name: "target path doesn't exist",
- targetPath: "testdata/oci/non-existent",
- want: sreconcile.ResultEmpty,
- wantErr: true,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.StatOperationFailedReason, "failed to stat source path: "),
- },
- },
- {
- name: "target path is a file",
- targetPath: "testdata/oci/repository/foo.txt",
- want: sreconcile.ResultEmpty,
- wantErr: true,
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.InvalidPathReason, "source path 'testdata/oci/repository/foo.txt' is not a directory"),
- },
- },
- }
-
- builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme())
-
- r := &OCIRepositoryReconciler{
- Client: builder.Build(),
- EventRecorder: record.NewFakeRecorder(32),
- Storage: testStorage,
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
-
- obj := &sourcev1.OCIRepository{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "reconcile-artifact-",
- },
- }
- if tt.beforeFunc != nil {
- tt.beforeFunc(obj)
- }
-
- artifact := &sourcev1.Artifact{}
- if tt.artifact != nil {
- artifact = tt.artifact
- }
- got, err := r.reconcileArtifact(ctx, obj, artifact, tt.targetPath)
- if tt.wantErr {
- g.Expect(err).To(HaveOccurred())
- } else {
- g.Expect(err).ToNot(HaveOccurred())
- }
-
- g.Expect(got).To(Equal(tt.want))
- g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions))
-
- if tt.assertArtifact != nil {
- g.Expect(obj.Status.Artifact).To(MatchArtifact(tt.artifact))
- }
-
- for _, path := range tt.assertPaths {
- localPath := testStorage.LocalPath(*obj.GetArtifact())
- path = filepath.Join(filepath.Dir(localPath), path)
- _, err := os.Lstat(path)
- g.Expect(err).ToNot(HaveOccurred())
- }
- })
- }
-}
-
-func TestOCIRepository_getArtifactURL(t *testing.T) {
- g := NewWithT(t)
-
- tmpDir := t.TempDir()
- server, err := setupRegistryServer(ctx, tmpDir, registryOptions{})
- g.Expect(err).ToNot(HaveOccurred())
-
- imgs, err := pushMultiplePodinfoImages(server.registryHost, "6.1.4", "6.1.5", "6.1.6")
- g.Expect(err).ToNot(HaveOccurred())
-
- tests := []struct {
- name string
- url string
- reference *sourcev1.OCIRepositoryRef
- wantErr bool
- want string
- }{
- {
- name: "valid url with no reference",
- url: "oci://ghcr.io/stefanprodan/charts",
- want: "ghcr.io/stefanprodan/charts",
- },
- {
- name: "valid url with tag reference",
- url: "oci://ghcr.io/stefanprodan/charts",
- reference: &sourcev1.OCIRepositoryRef{
- Tag: "6.1.6",
- },
- want: "ghcr.io/stefanprodan/charts:6.1.6",
- },
- {
- name: "valid url with digest reference",
- url: "oci://ghcr.io/stefanprodan/charts",
- reference: &sourcev1.OCIRepositoryRef{
- Digest: imgs["6.1.6"].digest.Hex,
- },
- want: "ghcr.io/stefanprodan/charts@" + imgs["6.1.6"].digest.Hex,
- },
- {
- name: "valid url with semver reference",
- url: fmt.Sprintf("oci://%s/podinfo", server.registryHost),
- reference: &sourcev1.OCIRepositoryRef{
- SemVer: ">= 6.1.6",
- },
- want: server.registryHost + "/podinfo:6.1.6",
- },
- {
- name: "invalid url without oci prefix",
- url: "ghcr.io/stefanprodan/charts",
- wantErr: true,
- },
- }
-
- builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme())
- r := &OCIRepositoryReconciler{
- Client: builder.Build(),
- EventRecorder: record.NewFakeRecorder(32),
- Storage: testStorage,
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- obj := &sourcev1.OCIRepository{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "artifact-url-",
- },
- Spec: sourcev1.OCIRepositorySpec{
- URL: tt.url,
- Interval: metav1.Duration{Duration: interval},
- Timeout: &metav1.Duration{Duration: timeout},
- },
- }
-
- if tt.reference != nil {
- obj.Spec.Reference = tt.reference
- }
-
- opts := r.craneOptions(ctx)
- opts = append(opts, crane.WithAuthFromKeychain(authn.DefaultKeychain))
- got, err := r.getArtifactURL(obj, opts)
- if tt.wantErr {
- g.Expect(err).To(HaveOccurred())
- return
- }
- g.Expect(err).ToNot(HaveOccurred())
- g.Expect(got).To(Equal(tt.want))
- })
- }
-}
-
-func TestOCIRepository_stalled(t *testing.T) {
- g := NewWithT(t)
-
- ns, err := testEnv.CreateNamespace(ctx, "ocirepository-stalled-test")
- g.Expect(err).ToNot(HaveOccurred())
- defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }()
-
- obj := &sourcev1.OCIRepository{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "ocirepository-reconcile",
- Namespace: ns.Name,
- },
- Spec: sourcev1.OCIRepositorySpec{
- URL: "oci://ghcr.io/test/test:v1",
- Interval: metav1.Duration{Duration: 60 * time.Minute},
- },
- }
-
- g.Expect(testEnv.Create(ctx, obj)).To(Succeed())
-
- key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace}
- resultobj := sourcev1.OCIRepository{}
-
- // Wait for the object to fail
- g.Eventually(func() bool {
- if err := testEnv.Get(ctx, key, &resultobj); err != nil {
- return false
- }
- readyCondition := conditions.Get(&resultobj, meta.ReadyCondition)
- if readyCondition == nil {
- return false
- }
- return obj.Generation == readyCondition.ObservedGeneration &&
- !conditions.IsReady(&resultobj)
- }, timeout).Should(BeTrue())
-
- // Verify that stalled condition is present in status
- stalledCondition := conditions.Get(&resultobj, meta.StalledCondition)
- g.Expect(stalledCondition).ToNot(BeNil())
- g.Expect(stalledCondition.Reason).Should(Equal(sourcev1.URLInvalidReason))
-}
-
-func TestOCIRepository_reconcileStorage(t *testing.T) {
- g := NewWithT(t)
-
- tests := []struct {
- name string
- beforeFunc func(obj *sourcev1.OCIRepository) error
- want sreconcile.Result
- wantErr bool
- assertConditions []metav1.Condition
- assertArtifact *sourcev1.Artifact
- assertPaths []string
- }{
- {
- name: "garbage collects",
- beforeFunc: func(obj *sourcev1.OCIRepository) error {
- revisions := []string{"a", "b", "c", "d"}
-
- for n := range revisions {
- v := revisions[n]
- obj.Status.Artifact = &sourcev1.Artifact{
- Path: fmt.Sprintf("/oci-reconcile-storage/%s.txt", v),
- Revision: v,
- }
- if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil {
- return err
- }
-
- if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(v), 0o640); err != nil {
- return err
- }
-
- if n != len(revisions)-1 {
- time.Sleep(time.Second)
- }
- }
-
- testStorage.SetArtifactURL(obj.Status.Artifact)
- return nil
- },
- assertArtifact: &sourcev1.Artifact{
- Path: "/oci-reconcile-storage/d.txt",
- Revision: "d",
- Checksum: "18ac3e7343f016890c510e93f935261169d9e3f565436429830faf0934f4f8e4",
- URL: testStorage.Hostname + "/oci-reconcile-storage/d.txt",
- Size: int64p(int64(len("d"))),
- },
- assertPaths: []string{
- "/oci-reconcile-storage/d.txt",
- "/oci-reconcile-storage/c.txt",
- "!/oci-reconcile-storage/b.txt",
- "!/oci-reconcile-storage/a.txt",
- },
- want: sreconcile.ResultSuccess,
- },
- {
- name: "notices missing artifact in storage",
- beforeFunc: func(obj *sourcev1.OCIRepository) error {
- obj.Status.Artifact = &sourcev1.Artifact{
- Path: "/oci-reconcile-storage/invalid.txt",
- Revision: "e",
- }
- testStorage.SetArtifactURL(obj.Status.Artifact)
- return nil
- },
- want: sreconcile.ResultSuccess,
- assertPaths: []string{
- "!/oci-reconcile-storage/invalid.txt",
- },
- assertConditions: []metav1.Condition{
- *conditions.TrueCondition(meta.ReconcilingCondition, "NoArtifact", "no artifact for resource in storage"),
- },
- },
- {
- name: "updates hostname on diff from current",
- beforeFunc: func(obj *sourcev1.OCIRepository) error {
- obj.Status.Artifact = &sourcev1.Artifact{
- Path: "/oci-reconcile-storage/hostname.txt",
- Revision: "f",
- Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80",
- URL: "http://outdated.com/oci-reconcile-storage/hostname.txt",
- }
- if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil {
- return err
- }
- if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader("file"), 0o640); err != nil {
- return err
- }
- return nil
- },
- want: sreconcile.ResultSuccess,
- assertPaths: []string{
- "/oci-reconcile-storage/hostname.txt",
- },
- assertArtifact: &sourcev1.Artifact{
- Path: "/oci-reconcile-storage/hostname.txt",
- Revision: "f",
- Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80",
- URL: testStorage.Hostname + "/oci-reconcile-storage/hostname.txt",
- Size: int64p(int64(len("file"))),
- },
- },
- }
-
- builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme())
- r := &OCIRepositoryReconciler{
- Client: builder.Build(),
- EventRecorder: record.NewFakeRecorder(32),
- Storage: testStorage,
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
-
- obj := &sourcev1.OCIRepository{
- ObjectMeta: metav1.ObjectMeta{
- GenerateName: "test-",
- },
- }
-
- g.Expect(tt.beforeFunc(obj)).To(Succeed())
- got, err := r.reconcileStorage(ctx, obj, &sourcev1.Artifact{}, "")
- if tt.wantErr {
- g.Expect(err).To(HaveOccurred())
- } else {
- g.Expect(err).ToNot(HaveOccurred())
- }
-
- g.Expect(got).To(Equal(tt.want))
- g.Expect(obj.Status.Artifact).To(MatchArtifact(tt.assertArtifact))
- if tt.assertArtifact != nil && tt.assertArtifact.URL != "" {
- g.Expect(obj.Status.Artifact.URL).To(Equal(tt.assertArtifact.URL))
- }
-
- g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions))
-
- for _, p := range tt.assertPaths {
- absoluteP := filepath.Join(testStorage.BasePath, p)
- if !strings.HasPrefix(p, "!") {
- g.Expect(absoluteP).To(BeAnExistingFile())
- continue
- }
-
- g.Expect(absoluteP).ToNot(BeAnExistingFile())
- }
- })
- }
-}
-
-func TestOCIRepository_ReconcileDelete(t *testing.T) {
- g := NewWithT(t)
-
- r := &OCIRepositoryReconciler{
- EventRecorder: record.NewFakeRecorder(32),
- Storage: testStorage,
- }
-
- obj := &sourcev1.OCIRepository{
- ObjectMeta: metav1.ObjectMeta{
- Name: "reconcile-delete-",
- DeletionTimestamp: &metav1.Time{Time: time.Now()},
- Finalizers: []string{
- sourcev1.SourceFinalizer,
- },
- },
- Status: sourcev1.OCIRepositoryStatus{},
- }
-
- artifact := testStorage.NewArtifactFor(sourcev1.OCIRepositoryKind, obj.GetObjectMeta(), "revision", "foo.txt")
- obj.Status.Artifact = &artifact
-
- got, err := r.reconcileDelete(ctx, obj)
- g.Expect(err).NotTo(HaveOccurred())
- g.Expect(got).To(Equal(sreconcile.ResultEmpty))
- g.Expect(controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer)).To(BeFalse())
- g.Expect(obj.Status.Artifact).To(BeNil())
-}
-
-func TestOCIRepositoryReconciler_notify(t *testing.T) {
-
- noopErr := serror.NewGeneric(fmt.Errorf("some no-op error"), "NoOpReason")
- noopErr.Ignore = true
-
- tests := []struct {
- name string
- res sreconcile.Result
- resErr error
- oldObjBeforeFunc func(obj *sourcev1.OCIRepository)
- newObjBeforeFunc func(obj *sourcev1.OCIRepository)
- commit git.Commit
- wantEvent string
- }{
- {
- name: "error - no event",
- res: sreconcile.ResultEmpty,
- resErr: errors.New("some error"),
- },
- {
- name: "new artifact",
- res: sreconcile.ResultSuccess,
- resErr: nil,
- newObjBeforeFunc: func(obj *sourcev1.OCIRepository) {
- obj.Spec.URL = "oci://newurl.io"
- obj.Status.Artifact = &sourcev1.Artifact{
- Revision: "xxx",
- Checksum: "yyy",
- Metadata: map[string]string{
- oci.SourceAnnotation: "https://github.com/stefanprodan/podinfo",
- oci.RevisionAnnotation: "6.1.8/b3b00fe35424a45d373bf4c7214178bc36fd7872",
- },
- }
- },
- wantEvent: "Normal NewArtifact stored artifact with digest 'xxx' from 'oci://newurl.io', origin source 'https://github.com/stefanprodan/podinfo', origin revision '6.1.8/b3b00fe35424a45d373bf4c7214178bc36fd7872'",
- },
- {
- name: "recovery from failure",
- res: sreconcile.ResultSuccess,
- resErr: nil,
- oldObjBeforeFunc: func(obj *sourcev1.OCIRepository) {
- obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.ReadOperationFailedReason, "fail")
- conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo")
- },
- newObjBeforeFunc: func(obj *sourcev1.OCIRepository) {
- obj.Spec.URL = "oci://newurl.io"
- obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
- conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
- },
- wantEvent: "Normal Succeeded stored artifact with digest 'xxx' from 'oci://newurl.io'",
- },
- {
- name: "recovery and new artifact",
- res: sreconcile.ResultSuccess,
- resErr: nil,
- oldObjBeforeFunc: func(obj *sourcev1.OCIRepository) {
- obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.ReadOperationFailedReason, "fail")
- conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo")
- },
- newObjBeforeFunc: func(obj *sourcev1.OCIRepository) {
- obj.Spec.URL = "oci://newurl.io"
- obj.Status.Artifact = &sourcev1.Artifact{Revision: "aaa", Checksum: "bbb"}
- conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
- },
- wantEvent: "Normal NewArtifact stored artifact with digest 'aaa' from 'oci://newurl.io'",
- },
- {
- name: "no updates",
- res: sreconcile.ResultSuccess,
- resErr: nil,
- oldObjBeforeFunc: func(obj *sourcev1.OCIRepository) {
- obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
- conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
- },
- newObjBeforeFunc: func(obj *sourcev1.OCIRepository) {
- obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
- conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
- },
- },
- {
- name: "no updates on requeue",
- res: sreconcile.ResultRequeue,
- resErr: nil,
- oldObjBeforeFunc: func(obj *sourcev1.OCIRepository) {
- obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy"}
- conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.URLInvalidReason, "ready")
- },
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
- recorder := record.NewFakeRecorder(32)
-
- oldObj := &sourcev1.OCIRepository{}
- newObj := oldObj.DeepCopy()
-
- if tt.oldObjBeforeFunc != nil {
- tt.oldObjBeforeFunc(oldObj)
- }
- if tt.newObjBeforeFunc != nil {
- tt.newObjBeforeFunc(newObj)
- }
-
- reconciler := &OCIRepositoryReconciler{
- EventRecorder: recorder,
- }
- reconciler.notify(ctx, oldObj, newObj, tt.res, tt.resErr)
-
- select {
- case x, ok := <-recorder.Events:
- g.Expect(ok).To(Equal(tt.wantEvent != ""), "unexpected event received")
- if tt.wantEvent != "" {
- g.Expect(x).To(ContainSubstring(tt.wantEvent))
- }
- default:
- if tt.wantEvent != "" {
- t.Errorf("expected some event to be emitted")
- }
- }
- })
- }
-}
-
-type artifactFixture struct {
- expectedPath string
- expectedChecksum string
-}
-
-type podinfoImage struct {
- url string
- tag string
- digest gcrv1.Hash
-}
-
-func createPodinfoImageFromTar(tarFileName, tag, registryURL string, opts ...crane.Option) (*podinfoImage, error) {
- // Create Image
- image, err := crane.Load(path.Join("testdata", "podinfo", tarFileName))
- if err != nil {
- return nil, err
- }
-
- image = setPodinfoImageAnnotations(image, tag)
-
- // url.Parse doesn't handle urls with no scheme well e.g localhost:
- if !(strings.HasPrefix(registryURL, "http://") || strings.HasPrefix(registryURL, "https://")) {
- registryURL = fmt.Sprintf("http://%s", registryURL)
- }
-
- myURL, err := url.Parse(registryURL)
- if err != nil {
- return nil, err
- }
- repositoryURL := fmt.Sprintf("%s/podinfo", myURL.Host)
-
- // Image digest
- podinfoImageDigest, err := image.Digest()
- if err != nil {
- return nil, err
- }
-
- // Push image
- err = crane.Push(image, repositoryURL, opts...)
- if err != nil {
- return nil, err
- }
-
- // Tag the image
- err = crane.Tag(repositoryURL, tag, opts...)
- if err != nil {
- return nil, err
- }
-
- return &podinfoImage{
- url: "oci://" + repositoryURL,
- tag: tag,
- digest: podinfoImageDigest,
- }, nil
-}
-
-func pushMultiplePodinfoImages(serverURL string, versions ...string) (map[string]podinfoImage, error) {
- podinfoVersions := make(map[string]podinfoImage)
-
- for i := 0; i < len(versions); i++ {
- pi, err := createPodinfoImageFromTar(fmt.Sprintf("podinfo-%s.tar", versions[i]), versions[i], serverURL)
- if err != nil {
- return nil, err
- }
-
- podinfoVersions[versions[i]] = *pi
-
- }
-
- return podinfoVersions, nil
-}
-
-func setPodinfoImageAnnotations(img gcrv1.Image, tag string) gcrv1.Image {
- metadata := map[string]string{
- oci.SourceAnnotation: "https://github.com/stefanprodan/podinfo",
- oci.RevisionAnnotation: fmt.Sprintf("%s/SHA", tag),
- }
- return mutate.Annotations(img, metadata).(gcrv1.Image)
-}
-
-// These two taken verbatim from https://ericchiang.github.io/post/go-tls/
-func certTemplate() (*x509.Certificate, error) {
- // generate a random serial number (a real cert authority would
- // have some logic behind this)
- serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
- serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
- if err != nil {
- return nil, errors.New("failed to generate serial number: " + err.Error())
- }
-
- tmpl := x509.Certificate{
- SerialNumber: serialNumber,
- Subject: pkix.Name{Organization: []string{"Flux project"}},
- SignatureAlgorithm: x509.SHA256WithRSA,
- NotBefore: time.Now(),
- NotAfter: time.Now().Add(time.Hour), // valid for an hour
- BasicConstraintsValid: true,
- }
- return &tmpl, nil
-}
-
-func createCert(template, parent *x509.Certificate, pub interface{}, parentPriv interface{}) (
- cert *x509.Certificate, certPEM []byte, err error) {
-
- certDER, err := x509.CreateCertificate(rand.Reader, template, parent, pub, parentPriv)
- if err != nil {
- return
- }
- // parse the resulting certificate so we can use it again
- cert, err = x509.ParseCertificate(certDER)
- if err != nil {
- return
- }
- // PEM encode the certificate (this is a standard TLS encoding)
- b := pem.Block{Type: "CERTIFICATE", Bytes: certDER}
- certPEM = pem.EncodeToMemory(&b)
- return
-}
-
-func createTLSServer() (*httptest.Server, []byte, []byte, []byte, tls.Certificate, error) {
- var clientTLSCert tls.Certificate
- var rootCertPEM, clientCertPEM, clientKeyPEM []byte
-
- srv := httptest.NewUnstartedServer(registry.New())
-
- // Create a self-signed cert to use as the CA and server cert.
- rootKey, err := rsa.GenerateKey(rand.Reader, 2048)
- if err != nil {
- return srv, rootCertPEM, clientCertPEM, clientKeyPEM, clientTLSCert, err
- }
- rootCertTmpl, err := certTemplate()
- if err != nil {
- return srv, rootCertPEM, clientCertPEM, clientKeyPEM, clientTLSCert, err
- }
- rootCertTmpl.IsCA = true
- rootCertTmpl.KeyUsage = x509.KeyUsageCertSign | x509.KeyUsageDigitalSignature
- rootCertTmpl.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}
- rootCertTmpl.IPAddresses = []net.IP{net.ParseIP("127.0.0.1")}
- var rootCert *x509.Certificate
- rootCert, rootCertPEM, err = createCert(rootCertTmpl, rootCertTmpl, &rootKey.PublicKey, rootKey)
- if err != nil {
- return srv, rootCertPEM, clientCertPEM, clientKeyPEM, clientTLSCert, err
- }
-
- rootKeyPEM := pem.EncodeToMemory(&pem.Block{
- Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(rootKey),
- })
-
- // Create a TLS cert using the private key and certificate.
- rootTLSCert, err := tls.X509KeyPair(rootCertPEM, rootKeyPEM)
- if err != nil {
- return srv, rootCertPEM, clientCertPEM, clientKeyPEM, clientTLSCert, err
- }
-
- // To trust a client certificate, the server must be given a
- // CA cert pool.
- pool := x509.NewCertPool()
- pool.AddCert(rootCert)
-
- srv.TLS = &tls.Config{
- ClientAuth: tls.RequireAndVerifyClientCert,
- Certificates: []tls.Certificate{rootTLSCert},
- ClientCAs: pool,
- }
-
- // Create a client cert, signed by the "CA".
- clientKey, err := rsa.GenerateKey(rand.Reader, 2048)
- if err != nil {
- return srv, rootCertPEM, clientCertPEM, clientKeyPEM, clientTLSCert, err
- }
- clientCertTmpl, err := certTemplate()
- if err != nil {
- return srv, rootCertPEM, clientCertPEM, clientKeyPEM, clientTLSCert, err
- }
- clientCertTmpl.KeyUsage = x509.KeyUsageDigitalSignature
- clientCertTmpl.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}
- _, clientCertPEM, err = createCert(clientCertTmpl, rootCert, &clientKey.PublicKey, rootKey)
- if err != nil {
- return srv, rootCertPEM, clientCertPEM, clientKeyPEM, clientTLSCert, err
- }
- // Encode and load the cert and private key for the client.
- clientKeyPEM = pem.EncodeToMemory(&pem.Block{
- Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(clientKey),
- })
- clientTLSCert, err = tls.X509KeyPair(clientCertPEM, clientKeyPEM)
- return srv, rootCertPEM, clientCertPEM, clientKeyPEM, clientTLSCert, err
-}
diff --git a/controllers/storage.go b/controllers/storage.go
deleted file mode 100644
index c5fd586f0..000000000
--- a/controllers/storage.go
+++ /dev/null
@@ -1,649 +0,0 @@
-/*
-Copyright 2020 The Flux authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package controllers
-
-import (
- "archive/tar"
- "compress/gzip"
- "context"
- "crypto/sha256"
- "fmt"
- "hash"
- "io"
- "io/fs"
- "net/url"
- "os"
- "path/filepath"
- "sort"
- "strings"
- "time"
-
- securejoin "github.com/cyphar/filepath-securejoin"
- "github.com/fluxcd/pkg/lockedfile"
- "github.com/fluxcd/pkg/untar"
- "github.com/go-git/go-git/v5/plumbing/format/gitignore"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- kerrors "k8s.io/apimachinery/pkg/util/errors"
-
- sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
- sourcefs "github.com/fluxcd/source-controller/internal/fs"
- "github.com/fluxcd/source-controller/pkg/sourceignore"
-)
-
-const GarbageCountLimit = 1000
-
-// Storage manages artifacts
-type Storage struct {
- // BasePath is the local directory path where the source artifacts are stored.
- BasePath string `json:"basePath"`
-
- // Hostname is the file server host name used to compose the artifacts URIs.
- Hostname string `json:"hostname"`
-
- // ArtifactRetentionTTL is the duration of time that artifacts will be kept
- // in storage before being garbage collected.
- ArtifactRetentionTTL time.Duration `json:"artifactRetentionTTL"`
-
- // ArtifactRetentionRecords is the maximum number of artifacts to be kept in
- // storage after a garbage collection.
- ArtifactRetentionRecords int `json:"artifactRetentionRecords"`
-}
-
-// NewStorage creates the storage helper for a given path and hostname.
-func NewStorage(basePath string, hostname string, artifactRetentionTTL time.Duration, artifactRetentionRecords int) (*Storage, error) {
- if f, err := os.Stat(basePath); os.IsNotExist(err) || !f.IsDir() {
- return nil, fmt.Errorf("invalid dir path: %s", basePath)
- }
- return &Storage{
- BasePath: basePath,
- Hostname: hostname,
- ArtifactRetentionTTL: artifactRetentionTTL,
- ArtifactRetentionRecords: artifactRetentionRecords,
- }, nil
-}
-
-// NewArtifactFor returns a new v1beta1.Artifact.
-func (s *Storage) NewArtifactFor(kind string, metadata metav1.Object, revision, fileName string) sourcev1.Artifact {
- path := sourcev1.ArtifactPath(kind, metadata.GetNamespace(), metadata.GetName(), fileName)
- artifact := sourcev1.Artifact{
- Path: path,
- Revision: revision,
- }
- s.SetArtifactURL(&artifact)
- return artifact
-}
-
-// SetArtifactURL sets the URL on the given v1beta1.Artifact.
-func (s Storage) SetArtifactURL(artifact *sourcev1.Artifact) {
- if artifact.Path == "" {
- return
- }
- format := "http://%s/%s"
- if strings.HasPrefix(s.Hostname, "http://") || strings.HasPrefix(s.Hostname, "https://") {
- format = "%s/%s"
- }
- artifact.URL = fmt.Sprintf(format, s.Hostname, strings.TrimLeft(artifact.Path, "/"))
-}
-
-// SetHostname sets the hostname of the given URL string to the current Storage.Hostname and returns the result.
-func (s Storage) SetHostname(URL string) string {
- u, err := url.Parse(URL)
- if err != nil {
- return ""
- }
- u.Host = s.Hostname
- return u.String()
-}
-
-// MkdirAll calls os.MkdirAll for the given v1beta1.Artifact base dir.
-func (s *Storage) MkdirAll(artifact sourcev1.Artifact) error {
- dir := filepath.Dir(s.LocalPath(artifact))
- return os.MkdirAll(dir, 0o700)
-}
-
-// RemoveAll calls os.RemoveAll for the given v1beta1.Artifact base dir.
-func (s *Storage) RemoveAll(artifact sourcev1.Artifact) (string, error) {
- var deletedDir string
- dir := filepath.Dir(s.LocalPath(artifact))
- // Check if the dir exists.
- _, err := os.Stat(dir)
- if err == nil {
- deletedDir = dir
- }
- return deletedDir, os.RemoveAll(dir)
-}
-
-// RemoveAllButCurrent removes all files for the given v1beta1.Artifact base dir, excluding the current one.
-func (s *Storage) RemoveAllButCurrent(artifact sourcev1.Artifact) ([]string, error) {
- deletedFiles := []string{}
- localPath := s.LocalPath(artifact)
- dir := filepath.Dir(localPath)
- var errors []string
- _ = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
- if err != nil {
- errors = append(errors, err.Error())
- return nil
- }
-
- if path != localPath && !info.IsDir() && info.Mode()&os.ModeSymlink != os.ModeSymlink {
- if err := os.Remove(path); err != nil {
- errors = append(errors, info.Name())
- } else {
- // Collect the successfully deleted file paths.
- deletedFiles = append(deletedFiles, path)
- }
- }
- return nil
- })
-
- if len(errors) > 0 {
- return deletedFiles, fmt.Errorf("failed to remove files: %s", strings.Join(errors, " "))
- }
- return deletedFiles, nil
-}
-
-// getGarbageFiles returns all files that need to be garbage collected for the given artifact.
-// Garbage files are determined based on the below flow:
-// 1. collect all files with an expired ttl
-// 2. if we satisfy maxItemsToBeRetained, then return
-// 3. else, remove all files till the latest n files remain, where n=maxItemsToBeRetained
-func (s *Storage) getGarbageFiles(artifact sourcev1.Artifact, totalCountLimit, maxItemsToBeRetained int, ttl time.Duration) ([]string, error) {
- localPath := s.LocalPath(artifact)
- dir := filepath.Dir(localPath)
- garbageFiles := []string{}
- filesWithCreatedTs := make(map[time.Time]string)
- // sortedPaths contain all files sorted according to their created ts.
- sortedPaths := []string{}
- now := time.Now().UTC()
- totalFiles := 0
- var errors []string
- creationTimestamps := []time.Time{}
- _ = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
- if err != nil {
- errors = append(errors, err.Error())
- return nil
- }
- if totalFiles >= totalCountLimit {
- return fmt.Errorf("reached file walking limit, already walked over: %d", totalFiles)
- }
- info, err := d.Info()
- if err != nil {
- errors = append(errors, err.Error())
- return nil
- }
- createdAt := info.ModTime().UTC()
- diff := now.Sub(createdAt)
- // Compare the time difference between now and the time at which the file was created
- // with the provided TTL. Delete if the difference is greater than the TTL.
- expired := diff > ttl
- if !info.IsDir() && info.Mode()&os.ModeSymlink != os.ModeSymlink {
- if path != localPath && expired {
- garbageFiles = append(garbageFiles, path)
- }
- totalFiles += 1
- filesWithCreatedTs[createdAt] = path
- creationTimestamps = append(creationTimestamps, createdAt)
- }
- return nil
-
- })
- if len(errors) > 0 {
- return nil, fmt.Errorf("can't walk over file: %s", strings.Join(errors, ","))
- }
-
- // We already collected enough garbage files to satisfy the no. of max
- // items that are supposed to be retained, so exit early.
- if totalFiles-len(garbageFiles) < maxItemsToBeRetained {
- return garbageFiles, nil
- }
-
- // sort all timestamps in an ascending order.
- sort.Slice(creationTimestamps, func(i, j int) bool { return creationTimestamps[i].Before(creationTimestamps[j]) })
- for _, ts := range creationTimestamps {
- path, ok := filesWithCreatedTs[ts]
- if !ok {
- return garbageFiles, fmt.Errorf("failed to fetch file for created ts: %v", ts)
- }
- sortedPaths = append(sortedPaths, path)
- }
-
- var collected int
- noOfGarbageFiles := len(garbageFiles)
- for _, path := range sortedPaths {
- if path != localPath && !stringInSlice(path, garbageFiles) {
- // If we previously collected a few garbage files with an expired ttl, then take that into account
- // when checking whether we need to remove more files to satisfy the max no. of items allowed
- // in the filesystem, along with the no. of files already removed in this loop.
- if noOfGarbageFiles > 0 {
- if (len(sortedPaths) - collected - len(garbageFiles)) > maxItemsToBeRetained {
- garbageFiles = append(garbageFiles, path)
- collected += 1
- }
- } else {
- if len(sortedPaths)-collected > maxItemsToBeRetained {
- garbageFiles = append(garbageFiles, path)
- collected += 1
- }
- }
- }
- }
-
- return garbageFiles, nil
-}
-
-// GarbageCollect removes all garabge files in the artifact dir according to the provided
-// retention options.
-func (s *Storage) GarbageCollect(ctx context.Context, artifact sourcev1.Artifact, timeout time.Duration) ([]string, error) {
- delFilesChan := make(chan []string)
- errChan := make(chan error)
- // Abort if it takes more than the provided timeout duration.
- ctx, cancel := context.WithTimeout(ctx, timeout)
- defer cancel()
-
- go func() {
- garbageFiles, err := s.getGarbageFiles(artifact, GarbageCountLimit, s.ArtifactRetentionRecords, s.ArtifactRetentionTTL)
- if err != nil {
- errChan <- err
- return
- }
- var errors []error
- var deleted []string
- if len(garbageFiles) > 0 {
- for _, file := range garbageFiles {
- err := os.Remove(file)
- if err != nil {
- errors = append(errors, err)
- } else {
- deleted = append(deleted, file)
- }
- }
- }
- if len(errors) > 0 {
- errChan <- kerrors.NewAggregate(errors)
- return
- }
- delFilesChan <- deleted
- }()
-
- for {
- select {
- case <-ctx.Done():
- return nil, ctx.Err()
- case delFiles := <-delFilesChan:
- return delFiles, nil
- case err := <-errChan:
- return nil, err
- }
- }
-}
-
-func stringInSlice(a string, list []string) bool {
- for _, b := range list {
- if b == a {
- return true
- }
- }
- return false
-}
-
-// ArtifactExist returns a boolean indicating whether the v1beta1.Artifact exists in storage and is a regular file.
-func (s *Storage) ArtifactExist(artifact sourcev1.Artifact) bool {
- fi, err := os.Lstat(s.LocalPath(artifact))
- if err != nil {
- return false
- }
- return fi.Mode().IsRegular()
-}
-
-// ArchiveFileFilter must return true if a file should not be included in the archive after inspecting the given path
-// and/or os.FileInfo.
-type ArchiveFileFilter func(p string, fi os.FileInfo) bool
-
-// SourceIgnoreFilter returns an ArchiveFileFilter that filters out files matching sourceignore.VCSPatterns and any of
-// the provided patterns.
-// If an empty gitignore.Pattern slice is given, the matcher is set to sourceignore.NewDefaultMatcher.
-func SourceIgnoreFilter(ps []gitignore.Pattern, domain []string) ArchiveFileFilter {
- matcher := sourceignore.NewDefaultMatcher(ps, domain)
- if len(ps) > 0 {
- ps = append(sourceignore.VCSPatterns(domain), ps...)
- matcher = sourceignore.NewMatcher(ps)
- }
- return func(p string, fi os.FileInfo) bool {
- return matcher.Match(strings.Split(p, string(filepath.Separator)), fi.IsDir())
- }
-}
-
-// Archive atomically archives the given directory as a tarball to the given v1beta1.Artifact path, excluding
-// directories and any ArchiveFileFilter matches. While archiving, any environment specific data (for example,
-// the user and group name) is stripped from file headers.
-// If successful, it sets the checksum and last update time on the artifact.
-func (s *Storage) Archive(artifact *sourcev1.Artifact, dir string, filter ArchiveFileFilter) (err error) {
- if f, err := os.Stat(dir); os.IsNotExist(err) || !f.IsDir() {
- return fmt.Errorf("invalid dir path: %s", dir)
- }
-
- localPath := s.LocalPath(*artifact)
- tf, err := os.CreateTemp(filepath.Split(localPath))
- if err != nil {
- return err
- }
- tmpName := tf.Name()
- defer func() {
- if err != nil {
- os.Remove(tmpName)
- }
- }()
-
- h := newHash()
- sz := &writeCounter{}
- mw := io.MultiWriter(h, tf, sz)
-
- gw := gzip.NewWriter(mw)
- tw := tar.NewWriter(gw)
- if err := filepath.Walk(dir, func(p string, fi os.FileInfo, err error) error {
- if err != nil {
- return err
- }
-
- // Ignore anything that is not a file or directories e.g. symlinks
- if m := fi.Mode(); !(m.IsRegular() || m.IsDir()) {
- return nil
- }
-
- // Skip filtered files
- if filter != nil && filter(p, fi) {
- return nil
- }
-
- header, err := tar.FileInfoHeader(fi, p)
- if err != nil {
- return err
- }
- // The name needs to be modified to maintain directory structure
- // as tar.FileInfoHeader only has access to the base name of the file.
- // Ref: https://golang.org/src/archive/tar/common.go?#L626
- relFilePath := p
- if filepath.IsAbs(dir) {
- relFilePath, err = filepath.Rel(dir, p)
- if err != nil {
- return err
- }
- }
- header.Name = relFilePath
-
- // We want to remove any environment specific data as well, this
- // ensures the checksum is purely content based.
- header.Gid = 0
- header.Uid = 0
- header.Uname = ""
- header.Gname = ""
- header.ModTime = time.Time{}
- header.AccessTime = time.Time{}
- header.ChangeTime = time.Time{}
-
- if err := tw.WriteHeader(header); err != nil {
- return err
- }
-
- if !fi.Mode().IsRegular() {
- return nil
- }
- f, err := os.Open(p)
- if err != nil {
- f.Close()
- return err
- }
- if _, err := io.Copy(tw, f); err != nil {
- f.Close()
- return err
- }
- return f.Close()
- }); err != nil {
- tw.Close()
- gw.Close()
- tf.Close()
- return err
- }
-
- if err := tw.Close(); err != nil {
- gw.Close()
- tf.Close()
- return err
- }
- if err := gw.Close(); err != nil {
- tf.Close()
- return err
- }
- if err := tf.Close(); err != nil {
- return err
- }
-
- if err := os.Chmod(tmpName, 0o600); err != nil {
- return err
- }
-
- if err := sourcefs.RenameWithFallback(tmpName, localPath); err != nil {
- return err
- }
-
- artifact.Checksum = fmt.Sprintf("%x", h.Sum(nil))
- artifact.LastUpdateTime = metav1.Now()
- artifact.Size = &sz.written
-
- return nil
-}
-
-// AtomicWriteFile atomically writes the io.Reader contents to the v1beta1.Artifact path.
-// If successful, it sets the checksum and last update time on the artifact.
-func (s *Storage) AtomicWriteFile(artifact *sourcev1.Artifact, reader io.Reader, mode os.FileMode) (err error) {
- localPath := s.LocalPath(*artifact)
- tf, err := os.CreateTemp(filepath.Split(localPath))
- if err != nil {
- return err
- }
- tfName := tf.Name()
- defer func() {
- if err != nil {
- os.Remove(tfName)
- }
- }()
-
- h := newHash()
- sz := &writeCounter{}
- mw := io.MultiWriter(h, tf, sz)
-
- if _, err := io.Copy(mw, reader); err != nil {
- tf.Close()
- return err
- }
- if err := tf.Close(); err != nil {
- return err
- }
-
- if err := os.Chmod(tfName, mode); err != nil {
- return err
- }
-
- if err := sourcefs.RenameWithFallback(tfName, localPath); err != nil {
- return err
- }
-
- artifact.Checksum = fmt.Sprintf("%x", h.Sum(nil))
- artifact.LastUpdateTime = metav1.Now()
- artifact.Size = &sz.written
-
- return nil
-}
-
-// Copy atomically copies the io.Reader contents to the v1beta1.Artifact path.
-// If successful, it sets the checksum and last update time on the artifact.
-func (s *Storage) Copy(artifact *sourcev1.Artifact, reader io.Reader) (err error) {
- localPath := s.LocalPath(*artifact)
- tf, err := os.CreateTemp(filepath.Split(localPath))
- if err != nil {
- return err
- }
- tfName := tf.Name()
- defer func() {
- if err != nil {
- os.Remove(tfName)
- }
- }()
-
- h := newHash()
- sz := &writeCounter{}
- mw := io.MultiWriter(h, tf, sz)
-
- if _, err := io.Copy(mw, reader); err != nil {
- tf.Close()
- return err
- }
- if err := tf.Close(); err != nil {
- return err
- }
-
- if err := sourcefs.RenameWithFallback(tfName, localPath); err != nil {
- return err
- }
-
- artifact.Checksum = fmt.Sprintf("%x", h.Sum(nil))
- artifact.LastUpdateTime = metav1.Now()
- artifact.Size = &sz.written
-
- return nil
-}
-
-// CopyFromPath atomically copies the contents of the given path to the path of the v1beta1.Artifact.
-// If successful, the checksum and last update time on the artifact is set.
-func (s *Storage) CopyFromPath(artifact *sourcev1.Artifact, path string) (err error) {
- f, err := os.Open(path)
- if err != nil {
- return err
- }
- defer func() {
- if cerr := f.Close(); cerr != nil && err == nil {
- err = cerr
- }
- }()
- err = s.Copy(artifact, f)
- return err
-}
-
-// CopyToPath copies the contents in the (sub)path of the given artifact to the given path.
-func (s *Storage) CopyToPath(artifact *sourcev1.Artifact, subPath, toPath string) error {
- // create a tmp directory to store artifact
- tmp, err := os.MkdirTemp("", "flux-include-")
- if err != nil {
- return err
- }
- defer os.RemoveAll(tmp)
-
- // read artifact file content
- localPath := s.LocalPath(*artifact)
- f, err := os.Open(localPath)
- if err != nil {
- return err
- }
- defer f.Close()
-
- // untar the artifact
- untarPath := filepath.Join(tmp, "unpack")
- if _, err = untar.Untar(f, untarPath); err != nil {
- return err
- }
-
- // create the destination parent dir
- if err = os.MkdirAll(filepath.Dir(toPath), os.ModePerm); err != nil {
- return err
- }
-
- // copy the artifact content to the destination dir
- fromPath, err := securejoin.SecureJoin(untarPath, subPath)
- if err != nil {
- return err
- }
- if err := sourcefs.RenameWithFallback(fromPath, toPath); err != nil {
- return err
- }
- return nil
-}
-
-// Symlink creates or updates a symbolic link for the given v1beta1.Artifact and returns the URL for the symlink.
-func (s *Storage) Symlink(artifact sourcev1.Artifact, linkName string) (string, error) {
- localPath := s.LocalPath(artifact)
- dir := filepath.Dir(localPath)
- link := filepath.Join(dir, linkName)
- tmpLink := link + ".tmp"
-
- if err := os.Remove(tmpLink); err != nil && !os.IsNotExist(err) {
- return "", err
- }
-
- if err := os.Symlink(localPath, tmpLink); err != nil {
- return "", err
- }
-
- if err := os.Rename(tmpLink, link); err != nil {
- return "", err
- }
-
- url := fmt.Sprintf("http://%s/%s", s.Hostname, filepath.Join(filepath.Dir(artifact.Path), linkName))
- return url, nil
-}
-
-// Checksum returns the SHA256 checksum for the data of the given io.Reader as a string.
-func (s *Storage) Checksum(reader io.Reader) string {
- h := newHash()
- _, _ = io.Copy(h, reader)
- return fmt.Sprintf("%x", h.Sum(nil))
-}
-
-// Lock creates a file lock for the given v1beta1.Artifact.
-func (s *Storage) Lock(artifact sourcev1.Artifact) (unlock func(), err error) {
- lockFile := s.LocalPath(artifact) + ".lock"
- mutex := lockedfile.MutexAt(lockFile)
- return mutex.Lock()
-}
-
-// LocalPath returns the secure local path of the given artifact (that is: relative to the Storage.BasePath).
-func (s *Storage) LocalPath(artifact sourcev1.Artifact) string {
- if artifact.Path == "" {
- return ""
- }
- path, err := securejoin.SecureJoin(s.BasePath, artifact.Path)
- if err != nil {
- return ""
- }
- return path
-}
-
-// newHash returns a new SHA256 hash.
-func newHash() hash.Hash {
- return sha256.New()
-}
-
-// writecounter is an implementation of io.Writer that only records the number
-// of bytes written.
-type writeCounter struct {
- written int64
-}
-
-func (wc *writeCounter) Write(p []byte) (int, error) {
- n := len(p)
- wc.written += int64(n)
- return n, nil
-}
diff --git a/controllers/storage_test.go b/controllers/storage_test.go
deleted file mode 100644
index 8e0e599a6..000000000
--- a/controllers/storage_test.go
+++ /dev/null
@@ -1,660 +0,0 @@
-/*
-Copyright 2020, 2021 The Flux authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package controllers
-
-import (
- "archive/tar"
- "compress/gzip"
- "context"
- "fmt"
- "io"
- "os"
- "path"
- "path/filepath"
- "strings"
- "testing"
- "time"
-
- "github.com/go-git/go-git/v5/plumbing/format/gitignore"
- . "github.com/onsi/gomega"
-
- sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
-)
-
-func TestStorageConstructor(t *testing.T) {
- dir := t.TempDir()
-
- if _, err := NewStorage("/nonexistent", "hostname", time.Minute, 2); err == nil {
- t.Fatal("nonexistent path was allowable in storage constructor")
- }
-
- f, err := os.CreateTemp(dir, "")
- if err != nil {
- t.Fatalf("while creating temporary file: %v", err)
- }
- f.Close()
-
- if _, err := NewStorage(f.Name(), "hostname", time.Minute, 2); err == nil {
- os.Remove(f.Name())
- t.Fatal("file path was accepted as basedir")
- }
- os.Remove(f.Name())
-
- if _, err := NewStorage(dir, "hostname", time.Minute, 2); err != nil {
- t.Fatalf("Valid path did not successfully return: %v", err)
- }
-}
-
-// walks a tar.gz and looks for paths with the basename. It does not match
-// symlinks properly at this time because that's painful.
-func walkTar(tarFile string, match string, dir bool) (int64, bool, error) {
- f, err := os.Open(tarFile)
- if err != nil {
- return 0, false, fmt.Errorf("could not open file: %w", err)
- }
- defer f.Close()
-
- gzr, err := gzip.NewReader(f)
- if err != nil {
- return 0, false, fmt.Errorf("could not unzip file: %w", err)
- }
- defer gzr.Close()
-
- tr := tar.NewReader(gzr)
- for {
- header, err := tr.Next()
- if err == io.EOF {
- break
- } else if err != nil {
- return 0, false, fmt.Errorf("corrupt tarball reading header: %w", err)
- }
-
- switch header.Typeflag {
- case tar.TypeDir:
- if header.Name == match && dir {
- return 0, true, nil
- }
- case tar.TypeReg:
- if header.Name == match {
- return header.Size, true, nil
- }
- default:
- // skip
- }
- }
-
- return 0, false, nil
-}
-
-func TestStorage_Archive(t *testing.T) {
- dir := t.TempDir()
-
- storage, err := NewStorage(dir, "hostname", time.Minute, 2)
- if err != nil {
- t.Fatalf("error while bootstrapping storage: %v", err)
- }
-
- createFiles := func(files map[string][]byte) (dir string, err error) {
- dir = t.TempDir()
- for name, b := range files {
- absPath := filepath.Join(dir, name)
- if err = os.MkdirAll(filepath.Dir(absPath), 0o750); err != nil {
- return
- }
- f, err := os.Create(absPath)
- if err != nil {
- return "", fmt.Errorf("could not create file %q: %w", absPath, err)
- }
- if n, err := f.Write(b); err != nil {
- f.Close()
- return "", fmt.Errorf("could not write %d bytes to file %q: %w", n, f.Name(), err)
- }
- f.Close()
- }
- return
- }
-
- matchFiles := func(t *testing.T, storage *Storage, artifact sourcev1.Artifact, files map[string][]byte, dirs []string) {
- t.Helper()
- for name, b := range files {
- mustExist := !(name[0:1] == "!")
- if !mustExist {
- name = name[1:]
- }
- s, exist, err := walkTar(storage.LocalPath(artifact), name, false)
- if err != nil {
- t.Fatalf("failed reading tarball: %v", err)
- }
- if bs := int64(len(b)); s != bs {
- t.Fatalf("%q size %v != %v", name, s, bs)
- }
- if exist != mustExist {
- if mustExist {
- t.Errorf("could not find file %q in tarball", name)
- } else {
- t.Errorf("tarball contained excluded file %q", name)
- }
- }
- }
- for _, name := range dirs {
- mustExist := !(name[0:1] == "!")
- if !mustExist {
- name = name[1:]
- }
- _, exist, err := walkTar(storage.LocalPath(artifact), name, true)
- if err != nil {
- t.Fatalf("failed reading tarball: %v", err)
- }
- if exist != mustExist {
- if mustExist {
- t.Errorf("could not find dir %q in tarball", name)
- } else {
- t.Errorf("tarball contained excluded file %q", name)
- }
- }
- }
- }
-
- tests := []struct {
- name string
- files map[string][]byte
- filter ArchiveFileFilter
- want map[string][]byte
- wantDirs []string
- wantErr bool
- }{
- {
- name: "no filter",
- files: map[string][]byte{
- ".git/config": nil,
- "file.jpg": []byte(`contents`),
- "manifest.yaml": nil,
- },
- filter: nil,
- want: map[string][]byte{
- ".git/config": nil,
- "file.jpg": []byte(`contents`),
- "manifest.yaml": nil,
- },
- },
- {
- name: "exclude VCS",
- files: map[string][]byte{
- ".git/config": nil,
- "manifest.yaml": nil,
- },
- wantDirs: []string{
- "!.git",
- },
- filter: SourceIgnoreFilter(nil, nil),
- want: map[string][]byte{
- "!.git/config": nil,
- "manifest.yaml": nil,
- },
- },
- {
- name: "custom",
- files: map[string][]byte{
- ".git/config": nil,
- "custom": nil,
- "horse.jpg": nil,
- },
- filter: SourceIgnoreFilter([]gitignore.Pattern{
- gitignore.ParsePattern("custom", nil),
- }, nil),
- want: map[string][]byte{
- "!git/config": nil,
- "!custom": nil,
- "horse.jpg": nil,
- },
- wantErr: false,
- },
- {
- name: "including directories",
- files: map[string][]byte{
- "test/.gitkeep": nil,
- },
- filter: SourceIgnoreFilter([]gitignore.Pattern{
- gitignore.ParsePattern("custom", nil),
- }, nil),
- wantDirs: []string{
- "test",
- },
- wantErr: false,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- dir, err := createFiles(tt.files)
- if err != nil {
- t.Error(err)
- return
- }
- defer os.RemoveAll(dir)
- artifact := sourcev1.Artifact{
- Path: filepath.Join(randStringRunes(10), randStringRunes(10), randStringRunes(10)+".tar.gz"),
- }
- if err := storage.MkdirAll(artifact); err != nil {
- t.Fatalf("artifact directory creation failed: %v", err)
- }
- if err := storage.Archive(&artifact, dir, tt.filter); (err != nil) != tt.wantErr {
- t.Errorf("Archive() error = %v, wantErr %v", err, tt.wantErr)
- }
- matchFiles(t, storage, artifact, tt.want, tt.wantDirs)
- })
- }
-}
-
-func TestStorageRemoveAllButCurrent(t *testing.T) {
- t.Run("bad directory in archive", func(t *testing.T) {
- dir := t.TempDir()
-
- s, err := NewStorage(dir, "hostname", time.Minute, 2)
- if err != nil {
- t.Fatalf("Valid path did not successfully return: %v", err)
- }
-
- if _, err := s.RemoveAllButCurrent(sourcev1.Artifact{Path: path.Join(dir, "really", "nonexistent")}); err == nil {
- t.Fatal("Did not error while pruning non-existent path")
- }
- })
-
- t.Run("collect names of deleted items", func(t *testing.T) {
- g := NewWithT(t)
- dir := t.TempDir()
-
- s, err := NewStorage(dir, "hostname", time.Minute, 2)
- g.Expect(err).ToNot(HaveOccurred(), "failed to create new storage")
-
- artifact := sourcev1.Artifact{
- Path: path.Join("foo", "bar", "artifact1.tar.gz"),
- }
-
- // Create artifact dir and artifacts.
- artifactDir := path.Join(dir, "foo", "bar")
- g.Expect(os.MkdirAll(artifactDir, 0o750)).NotTo(HaveOccurred())
- current := []string{
- path.Join(artifactDir, "artifact1.tar.gz"),
- }
- wantDeleted := []string{
- path.Join(artifactDir, "file1.txt"),
- path.Join(artifactDir, "file2.txt"),
- }
- createFile := func(files []string) {
- for _, c := range files {
- f, err := os.Create(c)
- g.Expect(err).ToNot(HaveOccurred())
- g.Expect(f.Close()).ToNot(HaveOccurred())
- }
- }
- createFile(current)
- createFile(wantDeleted)
- _, err = s.Symlink(artifact, "latest.tar.gz")
- g.Expect(err).ToNot(HaveOccurred(), "failed to create symlink")
-
- deleted, err := s.RemoveAllButCurrent(artifact)
- g.Expect(err).ToNot(HaveOccurred(), "failed to remove all but current")
- g.Expect(deleted).To(Equal(wantDeleted))
- })
-}
-
-func TestStorageRemoveAll(t *testing.T) {
- tests := []struct {
- name string
- artifactPath string
- createArtifactPath bool
- wantDeleted string
- }{
- {
- name: "delete non-existent path",
- artifactPath: path.Join("foo", "bar", "artifact1.tar.gz"),
- createArtifactPath: false,
- wantDeleted: "",
- },
- {
- name: "delete existing path",
- artifactPath: path.Join("foo", "bar", "artifact1.tar.gz"),
- createArtifactPath: true,
- wantDeleted: path.Join("foo", "bar"),
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
- dir := t.TempDir()
-
- s, err := NewStorage(dir, "hostname", time.Minute, 2)
- g.Expect(err).ToNot(HaveOccurred(), "failed to create new storage")
-
- artifact := sourcev1.Artifact{
- Path: tt.artifactPath,
- }
-
- if tt.createArtifactPath {
- g.Expect(os.MkdirAll(path.Join(dir, tt.artifactPath), 0o750)).ToNot(HaveOccurred())
- }
-
- deleted, err := s.RemoveAll(artifact)
- g.Expect(err).ToNot(HaveOccurred())
- g.Expect(deleted).To(ContainSubstring(tt.wantDeleted), "unexpected deleted path")
- })
- }
-}
-
-func TestStorageCopyFromPath(t *testing.T) {
- type File struct {
- Name string
- Content []byte
- }
-
- dir := t.TempDir()
-
- storage, err := NewStorage(dir, "hostname", time.Minute, 2)
- if err != nil {
- t.Fatalf("error while bootstrapping storage: %v", err)
- }
-
- createFile := func(file *File) (absPath string, err error) {
- dir = t.TempDir()
- absPath = filepath.Join(dir, file.Name)
- if err = os.MkdirAll(filepath.Dir(absPath), 0o750); err != nil {
- return
- }
- f, err := os.Create(absPath)
- if err != nil {
- return "", fmt.Errorf("could not create file %q: %w", absPath, err)
- }
- if n, err := f.Write(file.Content); err != nil {
- f.Close()
- return "", fmt.Errorf("could not write %d bytes to file %q: %w", n, f.Name(), err)
- }
- f.Close()
- return
- }
-
- matchFile := func(t *testing.T, storage *Storage, artifact sourcev1.Artifact, file *File, expectMismatch bool) {
- c, err := os.ReadFile(storage.LocalPath(artifact))
- if err != nil {
- t.Fatalf("failed reading file: %v", err)
- }
- if (string(c) != string(file.Content)) != expectMismatch {
- t.Errorf("artifact content does not match and not expecting mismatch, got: %q, want: %q", string(c), string(file.Content))
- }
- }
-
- tests := []struct {
- name string
- file *File
- want *File
- expectMismatch bool
- }{
- {
- name: "content match",
- file: &File{
- Name: "manifest.yaml",
- Content: []byte(`contents`),
- },
- want: &File{
- Name: "manifest.yaml",
- Content: []byte(`contents`),
- },
- },
- {
- name: "content not match",
- file: &File{
- Name: "manifest.yaml",
- Content: []byte(`contents`),
- },
- want: &File{
- Name: "manifest.yaml",
- Content: []byte(`mismatch contents`),
- },
- expectMismatch: true,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- absPath, err := createFile(tt.file)
- if err != nil {
- t.Error(err)
- return
- }
- artifact := sourcev1.Artifact{
- Path: filepath.Join(randStringRunes(10), randStringRunes(10), randStringRunes(10)),
- }
- if err := storage.MkdirAll(artifact); err != nil {
- t.Fatalf("artifact directory creation failed: %v", err)
- }
- if err := storage.CopyFromPath(&artifact, absPath); err != nil {
- t.Errorf("CopyFromPath() error = %v", err)
- }
- matchFile(t, storage, artifact, tt.want, tt.expectMismatch)
- })
- }
-}
-
-func TestStorage_getGarbageFiles(t *testing.T) {
- artifactFolder := path.Join("foo", "bar")
- tests := []struct {
- name string
- artifactPaths []string
- createPause time.Duration
- ttl time.Duration
- maxItemsToBeRetained int
- totalCountLimit int
- wantDeleted []string
- }{
- {
- name: "delete files based on maxItemsToBeRetained",
- artifactPaths: []string{
- path.Join(artifactFolder, "artifact1.tar.gz"),
- path.Join(artifactFolder, "artifact2.tar.gz"),
- path.Join(artifactFolder, "artifact3.tar.gz"),
- path.Join(artifactFolder, "artifact4.tar.gz"),
- path.Join(artifactFolder, "artifact5.tar.gz"),
- },
- createPause: time.Millisecond * 10,
- ttl: time.Minute * 2,
- totalCountLimit: 10,
- maxItemsToBeRetained: 2,
- wantDeleted: []string{
- path.Join(artifactFolder, "artifact1.tar.gz"),
- path.Join(artifactFolder, "artifact2.tar.gz"),
- path.Join(artifactFolder, "artifact3.tar.gz"),
- },
- },
- {
- name: "delete files based on ttl",
- artifactPaths: []string{
- path.Join(artifactFolder, "artifact1.tar.gz"),
- path.Join(artifactFolder, "artifact2.tar.gz"),
- path.Join(artifactFolder, "artifact3.tar.gz"),
- path.Join(artifactFolder, "artifact4.tar.gz"),
- path.Join(artifactFolder, "artifact5.tar.gz"),
- },
- createPause: time.Second * 1,
- ttl: time.Second*3 + time.Millisecond*500,
- totalCountLimit: 10,
- maxItemsToBeRetained: 4,
- wantDeleted: []string{
- path.Join(artifactFolder, "artifact1.tar.gz"),
- path.Join(artifactFolder, "artifact2.tar.gz"),
- },
- },
- {
- name: "delete files based on ttl and maxItemsToBeRetained",
- artifactPaths: []string{
- path.Join(artifactFolder, "artifact1.tar.gz"),
- path.Join(artifactFolder, "artifact2.tar.gz"),
- path.Join(artifactFolder, "artifact3.tar.gz"),
- path.Join(artifactFolder, "artifact4.tar.gz"),
- path.Join(artifactFolder, "artifact5.tar.gz"),
- path.Join(artifactFolder, "artifact6.tar.gz"),
- },
- createPause: time.Second * 1,
- ttl: time.Second*5 + time.Millisecond*500,
- totalCountLimit: 10,
- maxItemsToBeRetained: 4,
- wantDeleted: []string{
- path.Join(artifactFolder, "artifact1.tar.gz"),
- path.Join(artifactFolder, "artifact2.tar.gz"),
- },
- },
- {
- name: "delete files based on ttl and maxItemsToBeRetained and totalCountLimit",
- artifactPaths: []string{
- path.Join(artifactFolder, "artifact1.tar.gz"),
- path.Join(artifactFolder, "artifact2.tar.gz"),
- path.Join(artifactFolder, "artifact3.tar.gz"),
- path.Join(artifactFolder, "artifact4.tar.gz"),
- path.Join(artifactFolder, "artifact5.tar.gz"),
- path.Join(artifactFolder, "artifact6.tar.gz"),
- },
- createPause: time.Millisecond * 500,
- ttl: time.Millisecond * 500,
- totalCountLimit: 3,
- maxItemsToBeRetained: 2,
- wantDeleted: []string{
- path.Join(artifactFolder, "artifact1.tar.gz"),
- path.Join(artifactFolder, "artifact2.tar.gz"),
- path.Join(artifactFolder, "artifact3.tar.gz"),
- },
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
- dir := t.TempDir()
-
- s, err := NewStorage(dir, "hostname", tt.ttl, tt.maxItemsToBeRetained)
- g.Expect(err).ToNot(HaveOccurred(), "failed to create new storage")
-
- artifact := sourcev1.Artifact{
- Path: tt.artifactPaths[len(tt.artifactPaths)-1],
- }
- g.Expect(os.MkdirAll(path.Join(dir, artifactFolder), 0o750)).ToNot(HaveOccurred())
- for _, artifactPath := range tt.artifactPaths {
- f, err := os.Create(path.Join(dir, artifactPath))
- g.Expect(err).ToNot(HaveOccurred())
- g.Expect(f.Close()).ToNot(HaveOccurred())
- time.Sleep(tt.createPause)
- }
-
- deletedPaths, err := s.getGarbageFiles(artifact, tt.totalCountLimit, tt.maxItemsToBeRetained, tt.ttl)
- g.Expect(err).ToNot(HaveOccurred(), "failed to collect garbage files")
- g.Expect(len(tt.wantDeleted)).To(Equal(len(deletedPaths)))
- for _, wantDeletedPath := range tt.wantDeleted {
- present := false
- for _, deletedPath := range deletedPaths {
- if strings.Contains(deletedPath, wantDeletedPath) {
- present = true
- break
- }
- }
- if !present {
- g.Fail(fmt.Sprintf("expected file to be deleted, still exists: %s", wantDeletedPath))
- }
- }
- })
- }
-}
-
-func TestStorage_GarbageCollect(t *testing.T) {
- artifactFolder := path.Join("foo", "bar")
- tests := []struct {
- name string
- artifactPaths []string
- wantDeleted []string
- wantErr string
- ctxTimeout time.Duration
- }{
- {
- name: "garbage collects",
- artifactPaths: []string{
- path.Join(artifactFolder, "artifact1.tar.gz"),
- path.Join(artifactFolder, "artifact2.tar.gz"),
- path.Join(artifactFolder, "artifact3.tar.gz"),
- path.Join(artifactFolder, "artifact4.tar.gz"),
- },
- wantDeleted: []string{
- path.Join(artifactFolder, "artifact1.tar.gz"),
- path.Join(artifactFolder, "artifact2.tar.gz"),
- },
- ctxTimeout: time.Second * 1,
- },
- {
- name: "garbage collection fails with context timeout",
- artifactPaths: []string{
- path.Join(artifactFolder, "artifact1.tar.gz"),
- path.Join(artifactFolder, "artifact2.tar.gz"),
- path.Join(artifactFolder, "artifact3.tar.gz"),
- path.Join(artifactFolder, "artifact4.tar.gz"),
- },
- wantErr: "context deadline exceeded",
- ctxTimeout: time.Nanosecond * 1,
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- g := NewWithT(t)
- dir := t.TempDir()
-
- s, err := NewStorage(dir, "hostname", time.Second*2, 2)
- g.Expect(err).ToNot(HaveOccurred(), "failed to create new storage")
-
- artifact := sourcev1.Artifact{
- Path: tt.artifactPaths[len(tt.artifactPaths)-1],
- }
- g.Expect(os.MkdirAll(path.Join(dir, artifactFolder), 0o750)).ToNot(HaveOccurred())
- for i, artifactPath := range tt.artifactPaths {
- f, err := os.Create(path.Join(dir, artifactPath))
- g.Expect(err).ToNot(HaveOccurred())
- g.Expect(f.Close()).ToNot(HaveOccurred())
- if i != len(tt.artifactPaths)-1 {
- time.Sleep(time.Second * 1)
- }
- }
-
- deletedPaths, err := s.GarbageCollect(context.TODO(), artifact, tt.ctxTimeout)
- if tt.wantErr == "" {
- g.Expect(err).ToNot(HaveOccurred(), "failed to collect garbage files")
- } else {
- g.Expect(err).To(HaveOccurred())
- g.Expect(err.Error()).To(ContainSubstring(tt.wantErr))
- }
- if len(tt.wantDeleted) > 0 {
- g.Expect(len(tt.wantDeleted)).To(Equal(len(deletedPaths)))
- for _, wantDeletedPath := range tt.wantDeleted {
- present := false
- for _, deletedPath := range deletedPaths {
- if strings.Contains(deletedPath, wantDeletedPath) {
- g.Expect(deletedPath).ToNot(BeAnExistingFile())
- present = true
- break
- }
- }
- if present == false {
- g.Fail(fmt.Sprintf("expected file to be deleted, still exists: %s", wantDeletedPath))
- }
- }
- }
- })
- }
-}
diff --git a/controllers/testdata/certs/ca-key.pem b/controllers/testdata/certs/ca-key.pem
deleted file mode 100644
index b69de5ab5..000000000
--- a/controllers/testdata/certs/ca-key.pem
+++ /dev/null
@@ -1,5 +0,0 @@
------BEGIN EC PRIVATE KEY-----
-MHcCAQEEIOH/u9dMcpVcZ0+X9Fc78dCTj8SHuXawhLjhu/ej64WToAoGCCqGSM49
-AwEHoUQDQgAEruH/kPxtX3cyYR2G7TYmxLq6AHyzo/NGXc9XjGzdJutE2SQzn37H
-dvSJbH+Lvqo9ik0uiJVRVdCYD1j7gNszGA==
------END EC PRIVATE KEY-----
diff --git a/controllers/testdata/certs/ca.csr b/controllers/testdata/certs/ca.csr
deleted file mode 100644
index baa8aeb26..000000000
--- a/controllers/testdata/certs/ca.csr
+++ /dev/null
@@ -1,9 +0,0 @@
------BEGIN CERTIFICATE REQUEST-----
-MIIBIDCBxgIBADAZMRcwFQYDVQQDEw5leGFtcGxlLmNvbSBDQTBZMBMGByqGSM49
-AgEGCCqGSM49AwEHA0IABK7h/5D8bV93MmEdhu02JsS6ugB8s6PzRl3PV4xs3Sbr
-RNkkM59+x3b0iWx/i76qPYpNLoiVUVXQmA9Y+4DbMxigSzBJBgkqhkiG9w0BCQ4x
-PDA6MDgGA1UdEQQxMC+CCWxvY2FsaG9zdIILZXhhbXBsZS5jb22CD3d3dy5leGFt
-cGxlLmNvbYcEfwAAATAKBggqhkjOPQQDAgNJADBGAiEAkw85nyLhJssyCYsaFvRU
-EErhu66xHPJug/nG50uV5OoCIQCUorrflOSxfChPeCe4xfwcPv7FpcCYbKVYtGzz
-b34Wow==
------END CERTIFICATE REQUEST-----
diff --git a/controllers/testdata/certs/ca.pem b/controllers/testdata/certs/ca.pem
deleted file mode 100644
index 080bd24e6..000000000
--- a/controllers/testdata/certs/ca.pem
+++ /dev/null
@@ -1,11 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIBhzCCAS2gAwIBAgIUdsAtiX3gN0uk7ddxASWYE/tdv0wwCgYIKoZIzj0EAwIw
-GTEXMBUGA1UEAxMOZXhhbXBsZS5jb20gQ0EwHhcNMjAwNDE3MDgxODAwWhcNMjUw
-NDE2MDgxODAwWjAZMRcwFQYDVQQDEw5leGFtcGxlLmNvbSBDQTBZMBMGByqGSM49
-AgEGCCqGSM49AwEHA0IABK7h/5D8bV93MmEdhu02JsS6ugB8s6PzRl3PV4xs3Sbr
-RNkkM59+x3b0iWx/i76qPYpNLoiVUVXQmA9Y+4DbMxijUzBRMA4GA1UdDwEB/wQE
-AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQGyUiU1QEZiMAqjsnIYTwZ
-4yp5wzAPBgNVHREECDAGhwR/AAABMAoGCCqGSM49BAMCA0gAMEUCIQDzdtvKdE8O
-1+WRTZ9MuSiFYcrEz7Zne7VXouDEKqKEigIgM4WlbDeuNCKbqhqj+xZV0pa3rweb
-OD8EjjCMY69RMO0=
------END CERTIFICATE-----
diff --git a/controllers/testdata/certs/server-key.pem b/controllers/testdata/certs/server-key.pem
deleted file mode 100644
index 5054ff39f..000000000
--- a/controllers/testdata/certs/server-key.pem
+++ /dev/null
@@ -1,5 +0,0 @@
------BEGIN EC PRIVATE KEY-----
-MHcCAQEEIKQbEXV6nljOHMmPrWVWQ+JrAE5wsbE9iMhfY7wlJgXOoAoGCCqGSM49
-AwEHoUQDQgAE+53oBGlrvVUTelSGYji8GNHVhVg8jOs1PeeLuXCIZjQmctHLFEq3
-fE+mGxCL93MtpYzlwIWBf0m7pEGQre6bzg==
------END EC PRIVATE KEY-----
diff --git a/controllers/testdata/certs/server.csr b/controllers/testdata/certs/server.csr
deleted file mode 100644
index 5caf7b39c..000000000
--- a/controllers/testdata/certs/server.csr
+++ /dev/null
@@ -1,8 +0,0 @@
------BEGIN CERTIFICATE REQUEST-----
-MIIBHDCBwwIBADAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTBZMBMGByqGSM49AgEG
-CCqGSM49AwEHA0IABPud6ARpa71VE3pUhmI4vBjR1YVYPIzrNT3ni7lwiGY0JnLR
-yxRKt3xPphsQi/dzLaWM5cCFgX9Ju6RBkK3um86gSzBJBgkqhkiG9w0BCQ4xPDA6
-MDgGA1UdEQQxMC+CCWxvY2FsaG9zdIILZXhhbXBsZS5jb22CD3d3dy5leGFtcGxl
-LmNvbYcEfwAAATAKBggqhkjOPQQDAgNIADBFAiB5A6wvQ5x6g/zhiyn+wLzXsOaB
-Gb/F25p/zTHHQqZbkwIhAPUgWzy/2bs6eZEi97bSlaRdmrqHwqT842t5sEwGyXNV
------END CERTIFICATE REQUEST-----
diff --git a/controllers/testdata/certs/server.pem b/controllers/testdata/certs/server.pem
deleted file mode 100644
index 11c655a0b..000000000
--- a/controllers/testdata/certs/server.pem
+++ /dev/null
@@ -1,13 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIB7TCCAZKgAwIBAgIUB+17B8PU05wVTzRHLeG+S+ybZK4wCgYIKoZIzj0EAwIw
-GTEXMBUGA1UEAxMOZXhhbXBsZS5jb20gQ0EwHhcNMjAwNDE3MDgxODAwWhcNMzAw
-NDE1MDgxODAwWjAWMRQwEgYDVQQDEwtleGFtcGxlLmNvbTBZMBMGByqGSM49AgEG
-CCqGSM49AwEHA0IABPud6ARpa71VE3pUhmI4vBjR1YVYPIzrNT3ni7lwiGY0JnLR
-yxRKt3xPphsQi/dzLaWM5cCFgX9Ju6RBkK3um86jgbowgbcwDgYDVR0PAQH/BAQD
-AgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAA
-MB0GA1UdDgQWBBTM8HS5EIlVMBYv/300jN8PEArUgDAfBgNVHSMEGDAWgBQGyUiU
-1QEZiMAqjsnIYTwZ4yp5wzA4BgNVHREEMTAvgglsb2NhbGhvc3SCC2V4YW1wbGUu
-Y29tgg93d3cuZXhhbXBsZS5jb22HBH8AAAEwCgYIKoZIzj0EAwIDSQAwRgIhAOgB
-5W82FEgiTTOmsNRekkK5jUPbj4D4eHtb2/BI7ph4AiEA2AxHASIFBdv5b7Qf5prb
-bdNmUCzAvVuCAKuMjg2OPrE=
------END CERTIFICATE-----
diff --git a/docs/api/v1/source.md b/docs/api/v1/source.md
new file mode 100644
index 000000000..935d74275
--- /dev/null
+++ b/docs/api/v1/source.md
@@ -0,0 +1,3724 @@
+Source API reference v1
+Packages:
+
+
+Package v1 contains API Schema definitions for the source v1 API group
+Resource Types:
+
+
+Bucket is the Schema for the buckets API.
+
+
+GitRepository is the Schema for the gitrepositories API.
+
+
+HelmChart is the Schema for the helmcharts API.
+
+
+HelmRepository is the Schema for the helmrepositories API.
+
+
+OCIRepository is the Schema for the ocirepositories API
+
+
+
+(Appears on:
+BucketSpec)
+
+BucketSTSSpec specifies the required configuration to use a Security Token
+Service for fetching temporary credentials to authenticate in a Bucket
+provider.
+
+
+
+(Appears on:
+Bucket)
+
+BucketSpec specifies the required configuration to produce an Artifact for
+an object storage bucket.
+
+
+
+(Appears on:
+Bucket)
+
+BucketStatus records the observed state of a Bucket.
+
+
+ExternalArtifact is the Schema for the external artifacts API
+
+
+
+(Appears on:
+ExternalArtifact)
+
+ExternalArtifactSpec defines the desired state of ExternalArtifact
+
+
+
+(Appears on:
+ExternalArtifact)
+
+ExternalArtifactStatus defines the observed state of ExternalArtifact
+
+
+
+(Appears on:
+GitRepositorySpec,
+GitRepositoryStatus)
+
+GitRepositoryInclude specifies a local reference to a GitRepository which
+Artifact (sub-)contents must be included, and where they should be placed.
+
+
+
+(Appears on:
+GitRepositorySpec)
+
+GitRepositoryRef specifies the Git reference to resolve and checkout.
+
+
+
+(Appears on:
+GitRepository)
+
+GitRepositorySpec specifies the required configuration to produce an
+Artifact for a Git repository.
+
+
+
+(Appears on:
+GitRepository)
+
+GitRepositoryStatus records the observed state of a Git repository.
+
+
+
+(Appears on:
+GitRepositorySpec)
+
+GitRepositoryVerification specifies the Git commit signature verification
+strategy.
+
+
+
+(Appears on:
+GitRepositoryStatus,
+GitRepositoryVerification)
+
+GitVerificationMode specifies the verification mode for a Git repository.
+
+
+(Appears on:
+HelmChart)
+
+HelmChartSpec specifies the desired state of a Helm chart.
+
+
+
+(Appears on:
+HelmChart)
+
+HelmChartStatus records the observed state of the HelmChart.
+
+
+
+(Appears on:
+HelmRepository)
+
+HelmRepositorySpec specifies the required configuration to produce an
+Artifact for a Helm repository index YAML.
+
+
+
+(Appears on:
+HelmRepository)
+
+HelmRepositoryStatus records the observed state of the HelmRepository.
+
+
+
+(Appears on:
+HelmChartSpec)
+
+LocalHelmChartSourceReference contains enough information to let you locate
+the typed referenced object at namespace level.
+
+
+
+(Appears on:
+OCIRepositorySpec,
+OCIRepositoryStatus)
+
+OCILayerSelector specifies which layer should be extracted from an OCI Artifact
+
+
+
+(Appears on:
+OCIRepositorySpec)
+
+OCIRepositoryRef defines the image reference for the OCIRepository’s URL
+
+
+
+(Appears on:
+OCIRepository)
+
+OCIRepositorySpec defines the desired state of OCIRepository
+
+
+
+(Appears on:
+OCIRepository)
+
+OCIRepositoryStatus defines the observed state of OCIRepository
+
+
+
+(Appears on:
+HelmChartSpec,
+OCIRepositorySpec)
+
+OCIRepositoryVerification verifies the authenticity of an OCI Artifact
+
+
+
+(Appears on:
+OCIRepositoryVerification)
+
+OIDCIdentityMatch specifies options for verifying the certificate identity,
+i.e. the issuer and the subject of the certificate.
+
+
+Source interface must be supported by all API types.
+Source is the interface that provides generic access to the Artifact and
+interval. It must be supported by all kinds of the source.toolkit.fluxcd.io
+API group.
+
+
This page was automatically generated with gen-crd-api-reference-docs
+
diff --git a/docs/api/source.md b/docs/api/v1beta2/source.md
similarity index 66%
rename from docs/api/source.md
rename to docs/api/v1beta2/source.md
index 09f072743..8234f7014 100644
--- a/docs/api/source.md
+++ b/docs/api/v1beta2/source.md
@@ -1,4 +1,4 @@
-Source API reference
+Source API reference v1beta2
Packages:
-
@@ -114,6 +114,23 @@ string
+sts
+
+
+BucketSTSSpec
+
+
+ |
+
+(Optional)
+ STS specifies the required configuration to use a Security Token
+Service for fetching temporary credentials to authenticate in a
+Bucket provider.
+This field is only supported for the aws and generic providers.
+ |
+
+
+
insecure
bool
@@ -138,9 +155,21 @@ string
|
+prefix
+
+string
+
+ |
+
+(Optional)
+ Prefix to use for server-side filtering of files in the Bucket.
+ |
+
+
+
secretRef
-
+
github.com/fluxcd/pkg/apis/meta.LocalObjectReference
@@ -153,22 +182,65 @@ for the Bucket.
|
+certSecretRef
+
+
+github.com/fluxcd/pkg/apis/meta.LocalObjectReference
+
+
+ |
+
+(Optional)
+ CertSecretRef can be given the name of a Secret containing
+either or both of
+
+- a PEM-encoded client certificate (
tls.crt ) and private
+key (tls.key );
+- a PEM-encoded CA certificate (
ca.crt )
+
+and whichever are supplied, will be used for connecting to the
+bucket. The client cert and key are useful if you are
+authenticating with a certificate; the CA cert is useful if
+you are using a self-signed server certificate. The Secret must
+be of type Opaque or kubernetes.io/tls .
+This field is only supported for the generic provider.
+ |
+
+
+
+proxySecretRef
+
+
+github.com/fluxcd/pkg/apis/meta.LocalObjectReference
+
+
+ |
+
+(Optional)
+ ProxySecretRef specifies the Secret containing the proxy configuration
+to use while communicating with the Bucket server.
+ |
+
+
+
interval
-
+
Kubernetes meta/v1.Duration
|
- Interval at which to check the Endpoint for updates.
+Interval at which the Bucket Endpoint is checked for updates.
+This interval is approximate and may be subject to jitter to ensure
+efficient use of resources.
|
timeout
-
+
Kubernetes meta/v1.Duration
@@ -209,7 +281,7 @@ Bucket.
|
accessFrom
-
+
github.com/fluxcd/pkg/apis/acl.AccessFrom
@@ -312,7 +384,7 @@ string
|
secretRef
-
+
github.com/fluxcd/pkg/apis/meta.LocalObjectReference
@@ -322,7 +394,7 @@ github.com/fluxcd/pkg/apis/meta.LocalObjectReference
SecretRef specifies the Secret containing authentication credentials for
the GitRepository.
For HTTPS repositories the Secret must contain ‘username’ and ‘password’
-fields.
+fields for basic auth or ‘bearerToken’ field for token auth.
For SSH repositories the Secret must contain ‘identity’
and ‘known_hosts’ fields.
|
@@ -331,7 +403,7 @@ and ‘known_hosts’ fields.
interval
-
+
Kubernetes meta/v1.Duration
@@ -344,7 +416,7 @@ Kubernetes meta/v1.Duration
|
timeout
-
+
Kubernetes meta/v1.Duration
@@ -421,7 +493,9 @@ string
|
(Optional)
GitImplementation specifies which Git client library implementation to
-use. Defaults to ‘go-git’, valid values are (‘go-git’, ‘libgit2’).
+use. Defaults to ‘go-git’, valid values are (‘go-git’, ‘libgit2’).
+Deprecated: gitImplementation is deprecated now that ‘go-git’ is the
+only supported implementation.
|
@@ -434,8 +508,7 @@ bool
(Optional)
RecurseSubmodules enables the initialization of all submodules within
-the GitRepository as cloned from the URL, using their default settings.
-This option is available only when using the ‘go-git’ GitImplementation.
+the GitRepository as cloned from the URL, using their default settings.
|
@@ -456,7 +529,7 @@ should be included in the Artifact produced for this GitRepository.
accessFrom
-
+
github.com/fluxcd/pkg/apis/acl.AccessFrom
@@ -586,13 +659,15 @@ LocalHelmChartSourceReference
|
interval
-
+
Kubernetes meta/v1.Duration
|
- Interval is the interval at which to check the Source for updates.
+Interval at which the HelmChart SourceRef is checked for updates.
+This interval is approximate and may be subject to jitter to ensure
+efficient use of resources.
|
@@ -643,6 +718,19 @@ is merged before the ValuesFiles items. Ignored when omitted.
+ignoreMissingValuesFiles
+
+bool
+
+ |
+
+(Optional)
+ IgnoreMissingValuesFiles controls whether to silently ignore missing values
+files rather than failing.
+ |
+
+
+
suspend
bool
@@ -658,7 +746,7 @@ source.
accessFrom
-
+
github.com/fluxcd/pkg/apis/acl.AccessFrom
@@ -670,6 +758,24 @@ references to this object.
NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092
|
|
+
+
+verify
+
+
+github.com/fluxcd/source-controller/api/v1.OCIRepositoryVerification
+
+
+ |
+
+(Optional)
+ Verify contains the secret name containing the trusted public keys
+used to verify the signature and specifies which provider to use to check
+whether OCI image is authentic.
+This field is only supported when using HelmRepository source with spec.type ‘oci’.
+Chart dependencies, which are not bundled in the umbrella chart artifact, are not verified.
+ |
+
@@ -762,7 +868,7 @@ host.
secretRef
-
+
github.com/fluxcd/pkg/apis/meta.LocalObjectReference
@@ -773,8 +879,35 @@ github.com/fluxcd/pkg/apis/meta.LocalObjectReference
for the HelmRepository.
For HTTP/S basic auth the secret must contain ‘username’ and ‘password’
fields.
-For TLS the secret must contain a ‘certFile’ and ‘keyFile’, and/or
-‘caCert’ fields.
+Support for TLS auth using the ‘certFile’ and ‘keyFile’, and/or ‘caFile’
+keys is deprecated. Please use .spec.certSecretRef instead.
+ |
+
+
+
+certSecretRef
+
+
+github.com/fluxcd/pkg/apis/meta.LocalObjectReference
+
+
+ |
+
+(Optional)
+ CertSecretRef can be given the name of a Secret containing
+either or both of
+
+- a PEM-encoded client certificate (
tls.crt ) and private
+key (tls.key );
+- a PEM-encoded CA certificate (
ca.crt )
+
+and whichever are supplied, will be used for connecting to the
+registry. The client cert and key are useful if you are
+authenticating with a certificate; the CA cert is useful if
+you are using a self-signed server certificate. The Secret must
+be of type Opaque or kubernetes.io/tls .
+It takes precedence over the values specified in the Secret referred
+to by .spec.secretRef .
|
@@ -798,27 +931,46 @@ in credentials getting stolen in a MITM-attack.
interval
-
+
Kubernetes meta/v1.Duration
|
- Interval at which to check the URL for updates.
+(Optional)
+Interval at which the HelmRepository URL is checked for updates.
+This interval is approximate and may be subject to jitter to ensure
+efficient use of resources.
+ |
+
+
+
+insecure
+
+bool
+
+ |
+
+(Optional)
+ Insecure allows connecting to a non-TLS HTTP container registry.
+This field is only taken into account if the .spec.type field is set to ‘oci’.
|
timeout
-
+
Kubernetes meta/v1.Duration
|
(Optional)
- Timeout of the index fetch operation, defaults to 60s.
+Timeout is used for the index fetch operation for an HTTPS helm repository,
+and for remote OCI Repository operations like pulling for an OCI helm
+chart by the associated HelmChart.
+Its default value is 60s.
|
@@ -838,7 +990,7 @@ HelmRepository.
accessFrom
-
+
github.com/fluxcd/pkg/apis/acl.AccessFrom
@@ -863,6 +1015,20 @@ string
When this field is set to “oci”, the URL field value must be prefixed with “oci://”.
|
+
+
+provider
+
+string
+
+ |
+
+(Optional)
+ Provider used for authentication, can be ‘aws’, ‘azure’, ‘gcp’ or ‘generic’.
+This field is optional, and only taken into account if the .spec.type field is set to ‘oci’.
+When not specified, defaults to ‘generic’.
+ |
+
@@ -968,6 +1134,21 @@ defaults to the latest tag.
+layerSelector
+
+
+OCILayerSelector
+
+
+ |
+
+(Optional)
+ LayerSelector specifies which layer should be extracted from the OCI artifact.
+When not specified, the first layer found in the artifact is selected.
+ |
+
+
+
provider
string
@@ -983,7 +1164,7 @@ When not specified, defaults to ‘generic’.
secretRef
-
+
github.com/fluxcd/pkg/apis/meta.LocalObjectReference
@@ -997,6 +1178,22 @@ The secret must be of type kubernetes.io/dockerconfigjson.
| |
+verify
+
+
+github.com/fluxcd/source-controller/api/v1.OCIRepositoryVerification
+
+
+ |
+
+(Optional)
+ Verify contains the secret name containing the trusted public keys
+used to verify the signature and specifies which provider to use to check
+whether OCI image is authentic.
+ |
+
+
+
serviceAccountName
string
@@ -1013,44 +1210,64 @@ the image pull if the service account has attached pull secrets. For more inform
certSecretRef
-
+
github.com/fluxcd/pkg/apis/meta.LocalObjectReference
|
(Optional)
- CertSecretRef can be given the name of a secret containing
+ CertSecretRef can be given the name of a Secret containing
either or both of
-- a PEM-encoded client certificate (
certFile ) and private
-key (keyFile );
-- a PEM-encoded CA certificate (
caFile )
+- a PEM-encoded client certificate (
tls.crt ) and private
+key (tls.key );
+- a PEM-encoded CA certificate (
ca.crt )
and whichever are supplied, will be used for connecting to the
registry. The client cert and key are useful if you are
authenticating with a certificate; the CA cert is useful if
-you are using a self-signed server certificate.
+you are using a self-signed server certificate. The Secret must
+be of type Opaque or kubernetes.io/tls .
+Note: Support for the caFile , certFile and keyFile keys have
+been deprecated.
+ |
+ |
+
+
+proxySecretRef
+
+
+github.com/fluxcd/pkg/apis/meta.LocalObjectReference
+
+
+ |
+
+(Optional)
+ ProxySecretRef specifies the Secret containing the proxy configuration
+to use while communicating with the container registry.
|
interval
-
+
Kubernetes meta/v1.Duration
|
- The interval at which to check for image updates.
+Interval at which the OCIRepository URL is checked for updates.
+This interval is approximate and may be subject to jitter to ensure
+efficient use of resources.
|
timeout
-
+
Kubernetes meta/v1.Duration
@@ -1076,6 +1293,18 @@ consult the documentation for your version to find out what those are.
|
+insecure
+
+bool
+
+ |
+
+(Optional)
+ Insecure allows connecting to a non-TLS HTTP container registry.
+ |
+
+
+
suspend
bool
@@ -1107,15 +1336,9 @@ OCIRepositoryStatus
-
-(Appears on:
-BucketStatus,
-GitRepositoryStatus,
-HelmChartStatus,
-HelmRepositoryStatus,
-OCIRepositoryStatus)
-
Artifact represents the output of a Source reconciliation.
+Deprecated: use Artifact from api/v1 instead. This type will be removed in
+a future release.
+
+
+(Appears on:
+BucketSpec)
+
+BucketSTSSpec specifies the required configuration to use a Security Token
+Service for fetching temporary credentials to authenticate in a Bucket
+provider.
+
@@ -1275,6 +1599,23 @@ string
|
+sts
+
+
+BucketSTSSpec
+
+
+ |
+
+(Optional)
+ STS specifies the required configuration to use a Security Token
+Service for fetching temporary credentials to authenticate in a
+Bucket provider.
+This field is only supported for the aws and generic providers.
+ |
+
+
+
insecure
bool
@@ -1299,9 +1640,21 @@ string
|
+prefix
+
+string
+
+ |
+
+(Optional)
+ Prefix to use for server-side filtering of files in the Bucket.
+ |
+
+
+
secretRef
-
+
github.com/fluxcd/pkg/apis/meta.LocalObjectReference
@@ -1314,22 +1667,65 @@ for the Bucket.
|
+certSecretRef
+
+
+github.com/fluxcd/pkg/apis/meta.LocalObjectReference
+
+
+ |
+
+(Optional)
+ CertSecretRef can be given the name of a Secret containing
+either or both of
+
+- a PEM-encoded client certificate (
tls.crt ) and private
+key (tls.key );
+- a PEM-encoded CA certificate (
ca.crt )
+
+and whichever are supplied, will be used for connecting to the
+bucket. The client cert and key are useful if you are
+authenticating with a certificate; the CA cert is useful if
+you are using a self-signed server certificate. The Secret must
+be of type Opaque or kubernetes.io/tls .
+This field is only supported for the generic provider.
+ |
+
+
+
+proxySecretRef
+
+
+github.com/fluxcd/pkg/apis/meta.LocalObjectReference
+
+
+ |
+
+(Optional)
+ ProxySecretRef specifies the Secret containing the proxy configuration
+to use while communicating with the Bucket server.
+ |
+
+
+
interval
-
+
Kubernetes meta/v1.Duration
|
- Interval at which to check the Endpoint for updates.
+Interval at which the Bucket Endpoint is checked for updates.
+This interval is approximate and may be subject to jitter to ensure
+efficient use of resources.
|
timeout
-
+
Kubernetes meta/v1.Duration
@@ -1370,7 +1766,7 @@ Bucket.
|
accessFrom
-
+
github.com/fluxcd/pkg/apis/acl.AccessFrom
@@ -1419,7 +1815,7 @@ int64
|
conditions
-
+
[]Kubernetes meta/v1.Condition
@@ -1447,8 +1843,8 @@ BucketStatus.Artifact data is recommended.
|
artifact
-
-Artifact
+
+github.com/fluxcd/source-controller/api/v1.Artifact
|
@@ -1459,9 +1855,22 @@ Artifact
+observedIgnore
+
+string
+
+ |
+
+(Optional)
+ ObservedIgnore is the observed exclusion patterns used for constructing
+the source artifact.
+ |
+
+
+
ReconcileRequestStatus
-
+
github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus
@@ -1480,7 +1889,8 @@ github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus
(Appears on:
-GitRepositorySpec)
+GitRepositorySpec,
+GitRepositoryStatus)
GitRepositoryInclude specifies a local reference to a GitRepository which
Artifact (sub-)contents must be included, and where they should be placed.
@@ -1498,7 +1908,7 @@ Artifact (sub-)contents must be included, and where they should be placed.
|
repository
-
+
github.com/fluxcd/pkg/apis/meta.LocalObjectReference
@@ -1565,8 +1975,6 @@ string
|
(Optional)
Branch to check out, defaults to ‘master’ if no other field is defined.
-When GitRepositorySpec.GitImplementation is set to ‘go-git’, a shallow
-clone of the specified branch is performed.
|
@@ -1595,6 +2003,20 @@ string
+name
+
+string
+
+ |
+
+(Optional)
+ Name of the reference to check out; takes precedence over Branch, Tag and SemVer.
+It must be a valid Git reference: https://git-scm.com/docs/git-check-ref-format#_description
+Examples: “refs/heads/main”, “refs/tags/v0.1.0”, “refs/pull/420/head”, “refs/merge-requests/1/head”
+ |
+
+
+
commit
string
@@ -1603,9 +2025,8 @@ string
(Optional)
Commit SHA to check out, takes precedence over all reference fields.
-When GitRepositorySpec.GitImplementation is set to ‘go-git’, this can be
-combined with Branch to shallow clone the branch, in which the commit is
-expected to exist.
+This can be combined with Branch to shallow clone the branch, in which
+the commit is expected to exist.
|
|
@@ -1645,7 +2066,7 @@ string
secretRef
-
+
github.com/fluxcd/pkg/apis/meta.LocalObjectReference
@@ -1655,7 +2076,7 @@ github.com/fluxcd/pkg/apis/meta.LocalObjectReference
SecretRef specifies the Secret containing authentication credentials for
the GitRepository.
For HTTPS repositories the Secret must contain ‘username’ and ‘password’
-fields.
+fields for basic auth or ‘bearerToken’ field for token auth.
For SSH repositories the Secret must contain ‘identity’
and ‘known_hosts’ fields.
|
@@ -1664,7 +2085,7 @@ and ‘known_hosts’ fields.
interval
-
+
Kubernetes meta/v1.Duration
@@ -1677,7 +2098,7 @@ Kubernetes meta/v1.Duration
|
timeout
-
+
Kubernetes meta/v1.Duration
@@ -1754,7 +2175,9 @@ string
|
(Optional)
GitImplementation specifies which Git client library implementation to
-use. Defaults to ‘go-git’, valid values are (‘go-git’, ‘libgit2’).
+use. Defaults to ‘go-git’, valid values are (‘go-git’, ‘libgit2’).
+Deprecated: gitImplementation is deprecated now that ‘go-git’ is the
+only supported implementation.
|
@@ -1767,8 +2190,7 @@ bool
(Optional)
RecurseSubmodules enables the initialization of all submodules within
-the GitRepository as cloned from the URL, using their default settings.
-This option is available only when using the ‘go-git’ GitImplementation.
+the GitRepository as cloned from the URL, using their default settings.
|
@@ -1789,7 +2211,7 @@ should be included in the Artifact produced for this GitRepository.
accessFrom
-
+
github.com/fluxcd/pkg/apis/acl.AccessFrom
@@ -1839,7 +2261,7 @@ object.
|
conditions
-
+
[]Kubernetes meta/v1.Condition
@@ -1867,8 +2289,8 @@ GitRepositoryStatus.Artifact data is recommended.
|
artifact
-
-Artifact
+
+github.com/fluxcd/source-controller/api/v1.Artifact
|
@@ -1881,8 +2303,8 @@ Artifact
includedArtifacts
-
-[]Artifact
+
+[]github.com/fluxcd/source-controller/api/v1.Artifact
|
@@ -1910,13 +2332,56 @@ observed in .status.observedGeneration version of the object. This can
be used to determine if the content of the included repository has
changed.
It has the format of <algo>:<checksum>
, for example: sha256:<checksum>
.
+Deprecated: Replaced with explicit fields for observed artifact content
+config in the status.
+
+
+
+
+observedIgnore
+
+string
+
+ |
+
+(Optional)
+ ObservedIgnore is the observed exclusion patterns used for constructing
+the source artifact.
+ |
+
+
+
+observedRecurseSubmodules
+
+bool
+
+ |
+
+(Optional)
+ ObservedRecurseSubmodules is the observed resource submodules
+configuration used to produce the current Artifact.
+ |
+
+
+
+observedInclude
+
+
+[]GitRepositoryInclude
+
+
+ |
+
+(Optional)
+ ObservedInclude is the observed list of GitRepository resources used to
+to produce the current Artifact.
|
ReconcileRequestStatus
-
+
github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus
@@ -1964,7 +2429,7 @@ string
|
secretRef
-
+
github.com/fluxcd/pkg/apis/meta.LocalObjectReference
@@ -2037,13 +2502,15 @@ LocalHelmChartSourceReference
|
interval
-
+
Kubernetes meta/v1.Duration
|
- Interval is the interval at which to check the Source for updates.
+Interval at which the HelmChart SourceRef is checked for updates.
+This interval is approximate and may be subject to jitter to ensure
+efficient use of resources.
|
@@ -2094,6 +2561,19 @@ is merged before the ValuesFiles items. Ignored when omitted.
+ignoreMissingValuesFiles
+
+bool
+
+ |
+
+(Optional)
+ IgnoreMissingValuesFiles controls whether to silently ignore missing values
+files rather than failing.
+ |
+
+
+
suspend
bool
@@ -2109,7 +2589,7 @@ source.
accessFrom
-
+
github.com/fluxcd/pkg/apis/acl.AccessFrom
@@ -2121,6 +2601,24 @@ references to this object.
NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092
|
|
+
+
+verify
+
+
+github.com/fluxcd/source-controller/api/v1.OCIRepositoryVerification
+
+
+ |
+
+(Optional)
+ Verify contains the secret name containing the trusted public keys
+used to verify the signature and specifies which provider to use to check
+whether OCI image is authentic.
+This field is only supported when using HelmRepository source with spec.type ‘oci’.
+Chart dependencies, which are not bundled in the umbrella chart artifact, are not verified.
+ |
+
@@ -2183,9 +2681,23 @@ resolved chart reference.
+observedValuesFiles
+
+[]string
+
+ |
+
+(Optional)
+ ObservedValuesFiles are the observed value files of the last successful
+reconciliation.
+It matches the chart in the last successfully reconciled artifact.
+ |
+
+
+
conditions
-
+
[]Kubernetes meta/v1.Condition
@@ -2213,8 +2725,8 @@ BucketStatus.Artifact data is recommended.
|
artifact
-
-Artifact
+
+github.com/fluxcd/source-controller/api/v1.Artifact
|
@@ -2227,7 +2739,7 @@ Artifact
ReconcileRequestStatus
-
+
github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus
@@ -2276,7 +2788,7 @@ host.
|
secretRef
-
+
github.com/fluxcd/pkg/apis/meta.LocalObjectReference
@@ -2287,8 +2799,35 @@ github.com/fluxcd/pkg/apis/meta.LocalObjectReference
for the HelmRepository.
For HTTP/S basic auth the secret must contain ‘username’ and ‘password’
fields.
-For TLS the secret must contain a ‘certFile’ and ‘keyFile’, and/or
-‘caCert’ fields.
+Support for TLS auth using the ‘certFile’ and ‘keyFile’, and/or ‘caFile’
+keys is deprecated. Please use .spec.certSecretRef instead.
+ |
+
+
+
+certSecretRef
+
+
+github.com/fluxcd/pkg/apis/meta.LocalObjectReference
+
+
+ |
+
+(Optional)
+ CertSecretRef can be given the name of a Secret containing
+either or both of
+
+- a PEM-encoded client certificate (
tls.crt ) and private
+key (tls.key );
+- a PEM-encoded CA certificate (
ca.crt )
+
+and whichever are supplied, will be used for connecting to the
+registry. The client cert and key are useful if you are
+authenticating with a certificate; the CA cert is useful if
+you are using a self-signed server certificate. The Secret must
+be of type Opaque or kubernetes.io/tls .
+It takes precedence over the values specified in the Secret referred
+to by .spec.secretRef .
|
@@ -2312,27 +2851,46 @@ in credentials getting stolen in a MITM-attack.
interval
-
+
Kubernetes meta/v1.Duration
|
- Interval at which to check the URL for updates.
+(Optional)
+Interval at which the HelmRepository URL is checked for updates.
+This interval is approximate and may be subject to jitter to ensure
+efficient use of resources.
+ |
+
+
+
+insecure
+
+bool
+
+ |
+
+(Optional)
+ Insecure allows connecting to a non-TLS HTTP container registry.
+This field is only taken into account if the .spec.type field is set to ‘oci’.
|
timeout
-
+
Kubernetes meta/v1.Duration
|
(Optional)
- Timeout of the index fetch operation, defaults to 60s.
+Timeout is used for the index fetch operation for an HTTPS helm repository,
+and for remote OCI Repository operations like pulling for an OCI helm
+chart by the associated HelmChart.
+Its default value is 60s.
|
@@ -2352,7 +2910,7 @@ HelmRepository.
accessFrom
-
+
github.com/fluxcd/pkg/apis/acl.AccessFrom
@@ -2377,6 +2935,20 @@ string
When this field is set to “oci”, the URL field value must be prefixed with “oci://”.
|
+
+
+provider
+
+string
+
+ |
+
+(Optional)
+ Provider used for authentication, can be ‘aws’, ‘azure’, ‘gcp’ or ‘generic’.
+This field is optional, and only taken into account if the .spec.type field is set to ‘oci’.
+When not specified, defaults to ‘generic’.
+ |
+
@@ -2415,7 +2987,7 @@ object.
conditions
-
+
[]Kubernetes meta/v1.Condition
@@ -2443,8 +3015,8 @@ HelmRepositoryStatus.Artifact data is recommended.
|
artifact
-
-Artifact
+
+github.com/fluxcd/source-controller/api/v1.Artifact
|
@@ -2457,7 +3029,7 @@ Artifact
ReconcileRequestStatus
-
+
github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus
@@ -2529,6 +3101,57 @@ string
+
+
+(Appears on:
+OCIRepositorySpec,
+OCIRepositoryStatus)
+
+OCILayerSelector specifies which layer should be extracted from an OCI Artifact
+
@@ -2574,6 +3197,18 @@ the range, takes precedence over Tag.
|
+semverFilter
+
+string
+
+ |
+
+(Optional)
+ SemverFilter is a regex pattern to filter the tags within the SemVer range.
+ |
+
+
+
tag
string
@@ -2634,6 +3269,21 @@ defaults to the latest tag.
|
+layerSelector
+
+
+OCILayerSelector
+
+
+ |
+
+(Optional)
+ LayerSelector specifies which layer should be extracted from the OCI artifact.
+When not specified, the first layer found in the artifact is selected.
+ |
+
+
+
provider
string
@@ -2649,7 +3299,7 @@ When not specified, defaults to ‘generic’.
secretRef
-
+
github.com/fluxcd/pkg/apis/meta.LocalObjectReference
@@ -2663,6 +3313,22 @@ The secret must be of type kubernetes.io/dockerconfigjson.
| |
+verify
+
+
+github.com/fluxcd/source-controller/api/v1.OCIRepositoryVerification
+
+
+ |
+
+(Optional)
+ Verify contains the secret name containing the trusted public keys
+used to verify the signature and specifies which provider to use to check
+whether OCI image is authentic.
+ |
+
+
+
serviceAccountName
string
@@ -2679,44 +3345,64 @@ the image pull if the service account has attached pull secrets. For more inform
certSecretRef
-
+
github.com/fluxcd/pkg/apis/meta.LocalObjectReference
|
(Optional)
- CertSecretRef can be given the name of a secret containing
+ CertSecretRef can be given the name of a Secret containing
either or both of
-- a PEM-encoded client certificate (
certFile ) and private
-key (keyFile );
-- a PEM-encoded CA certificate (
caFile )
+- a PEM-encoded client certificate (
tls.crt ) and private
+key (tls.key );
+- a PEM-encoded CA certificate (
ca.crt )
and whichever are supplied, will be used for connecting to the
registry. The client cert and key are useful if you are
authenticating with a certificate; the CA cert is useful if
-you are using a self-signed server certificate.
+you are using a self-signed server certificate. The Secret must
+be of type Opaque or kubernetes.io/tls .
+Note: Support for the caFile , certFile and keyFile keys have
+been deprecated.
+ |
+ |
+
+
+proxySecretRef
+
+
+github.com/fluxcd/pkg/apis/meta.LocalObjectReference
+
+
+ |
+
+(Optional)
+ ProxySecretRef specifies the Secret containing the proxy configuration
+to use while communicating with the container registry.
|
interval
-
+
Kubernetes meta/v1.Duration
|
- The interval at which to check for image updates.
+Interval at which the OCIRepository URL is checked for updates.
+This interval is approximate and may be subject to jitter to ensure
+efficient use of resources.
|
timeout
-
+
Kubernetes meta/v1.Duration
@@ -2742,6 +3428,18 @@ consult the documentation for your version to find out what those are.
|
+insecure
+
+bool
+
+ |
+
+(Optional)
+ Insecure allows connecting to a non-TLS HTTP container registry.
+ |
+
+
+
suspend
bool
@@ -2789,7 +3487,7 @@ int64
conditions
-
+
[]Kubernetes meta/v1.Condition
@@ -2815,8 +3513,8 @@ string
|
artifact
-
-Artifact
+
+github.com/fluxcd/source-controller/api/v1.Artifact
|
@@ -2827,59 +3525,66 @@ Artifact
|
-ReconcileRequestStatus
+contentConfigChecksum
-
-github.com/fluxcd/pkg/apis/meta.ReconcileRequestStatus
-
+string
|
-
-(Members of ReconcileRequestStatus are embedded into this type.)
-
+(Optional)
+ContentConfigChecksum is a checksum of all the configurations related to
+the content of the source artifact:
+- .spec.ignore
+- .spec.layerSelector
+observed in .status.observedGeneration version of the object. This can
+be used to determine if the content configuration has changed and the
+artifact needs to be rebuilt.
+It has the format of <algo>:<checksum> , for example: sha256:<checksum> .
+Deprecated: Replaced with explicit fields for observed artifact content
+config in the status.
|
-
-
-
-
-
-OCIRepositoryVerification verifies the authenticity of an OCI Artifact
-