Compare commits

..

No commits in common. "main" and "v0.24.1" have entirely different histories.

331 changed files with 22935 additions and 56311 deletions

View File

@ -1 +1 @@
build/ build/libgit2/

View File

@ -1,40 +0,0 @@
version: 2
updates:
- package-ecosystem: "gomod"
directory: "/"
labels: ["dependencies"]
schedule:
interval: "monthly"
groups:
go-deps:
patterns:
- "*"
allow:
- dependency-type: "direct"
ignore:
# Cloud SDK are updated manually
- dependency-name: "cloud.google.com/*"
- dependency-name: "github.com/Azure/azure-sdk-for-go/*"
# Kubernetes deps are updated by fluxcd/pkg/runtime
- dependency-name: "k8s.io/*"
- dependency-name: "sigs.k8s.io/*"
- dependency-name: "github.com/go-logr/*"
# OCI deps are updated by fluxcd/pkg/oci
- dependency-name: "github.com/docker/*"
- dependency-name: "github.com/distribution/*"
- dependency-name: "github.com/google/go-containerregistry*"
- dependency-name: "github.com/opencontainers/*"
# Helm deps are updated by fluxcd/pkg/helmtestserver
- dependency-name: "helm.sh/helm/*"
# Flux APIs are updated at release time
- dependency-name: "github.com/fluxcd/source-controller/api"
- package-ecosystem: "github-actions"
directory: "/"
labels: ["area/ci", "dependencies"]
groups:
ci:
patterns:
- "*"
schedule:
interval: "monthly"

39
.github/labels.yaml vendored
View File

@ -1,39 +0,0 @@
# Configuration file to declaratively configure labels
# Ref: https://github.com/EndBug/label-sync#Config-files
- name: area/bucket
description: Bucket related issues and pull requests
color: '#00b140'
- name: area/git
description: Git related issues and pull requests
color: '#863faf'
- name: area/helm
description: Helm related issues and pull requests
color: '#1673b6'
- name: area/oci
description: OCI related issues and pull requests
color: '#c739ff'
- name: area/storage
description: Storage related issues and pull requests
color: '#4b0082'
- name: backport:release/v1.0.x
description: To be backported to release/v1.0.x
color: '#ffd700'
- name: backport:release/v1.1.x
description: To be backported to release/v1.1.x
color: '#ffd700'
- name: backport:release/v1.2.x
description: To be backported to release/v1.2.x
color: '#ffd700'
- name: backport:release/v1.3.x
description: To be backported to release/v1.3.x
color: '#ffd700'
- name: backport:release/v1.4.x
description: To be backported to release/v1.4.x
color: '#ffd700'
- name: backport:release/v1.5.x
description: To be backported to release/v1.5.x
color: '#ffd700'
- name: backport:release/v1.6.x
description: To be backported to release/v1.6.x
color: '#ffd700'

View File

@ -1,34 +0,0 @@
name: backport
on:
pull_request_target:
types: [closed, labeled]
permissions:
contents: read
jobs:
pull-request:
runs-on: ubuntu-latest
permissions:
contents: write
pull-requests: write
if: github.event.pull_request.state == 'closed' && github.event.pull_request.merged && (github.event_name != 'labeled' || startsWith('backport:', github.event.label.name))
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Create backport PRs
uses: korthout/backport-action@436145e922f9561fc5ea157ff406f21af2d6b363 # v3.2.0
# xref: https://github.com/korthout/backport-action#inputs
with:
# Use token to allow workflows to be triggered for the created PR
github_token: ${{ secrets.BOT_GITHUB_TOKEN }}
# Match labels with a pattern `backport:<target-branch>`
label_pattern: '^backport:([^ ]+)$'
# A bit shorter pull-request title than the default
pull_title: '[${target_branch}] ${pull_title}'
# Simpler PR description than default
pull_description: |-
Automated backport to `${target_branch}`, triggered by a label in #${pull_number}.

View File

@ -1,31 +1,24 @@
name: fuzz name: CIFuzz
on: on:
pull_request: pull_request:
branches: branches:
- 'main' - main
- 'release/**'
paths-ignore:
- 'CHANGELOG.md'
- 'README.md'
- 'MAINTAINERS'
permissions: permissions:
contents: read contents: read
jobs: jobs:
smoketest: Fuzzing:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 uses: actions/checkout@v2
- name: Setup Go - name: Restore Go cache
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 uses: actions/cache@v1
with: with:
go-version: 1.24.x path: /home/runner/work/_temp/_github_home/go/pkg/mod
cache-dependency-path: | key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
**/go.sum restore-keys: |
**/go.mod ${{ runner.os }}-go-
- name: Smoke test Fuzzers - name: Smoke test Fuzzers
run: make fuzz-smoketest run: make fuzz-smoketest
env:
SKIP_COSIGN_VERIFICATION: true

View File

@ -1,15 +1,10 @@
name: e2e name: e2e
on: on:
workflow_dispatch:
pull_request: pull_request:
branches:
- 'main'
- 'release/**'
push: push:
branches: branches:
- 'main' - main
- 'release/**'
permissions: permissions:
contents: read # for actions/checkout to fetch code contents: read # for actions/checkout to fetch code
@ -20,34 +15,104 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 uses: actions/checkout@v2
- name: Setup Go - name: Setup Go
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 uses: actions/setup-go@v2
with: with:
go-version: 1.24.x go-version: 1.17.x
cache-dependency-path: | - name: Restore Go cache
**/go.sum uses: actions/cache@v1
**/go.mod with:
path: /home/runner/work/_temp/_github_home/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Verify
run: make verify
- name: Enable integration tests - name: Enable integration tests
# Only run integration tests for main and release branches # Only run integration tests for main branch
if: github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/heads/release/') if: github.ref == 'refs/heads/main'
run: | run: |
echo 'GO_TAGS=integration' >> $GITHUB_ENV echo 'GO_TAGS=integration' >> $GITHUB_ENV
- name: Run tests
env:
TEST_AZURE_ACCOUNT_NAME: ${{ secrets.TEST_AZURE_ACCOUNT_NAME }}
TEST_AZURE_ACCOUNT_KEY: ${{ secrets.TEST_AZURE_ACCOUNT_KEY }}
run: make test
- name: Setup Kubernetes - name: Setup Kubernetes
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0 uses: engineerd/setup-kind@v0.5.0
with: with:
cluster_name: kind version: v0.11.1
image: kindest/node:v1.21.1@sha256:69860bda5563ac81e3c0057d654b5253219618a22ec3a346306239bba8cfa1a6
- name: Setup Kustomize - name: Setup Kustomize
uses: fluxcd/pkg/actions/kustomize@main uses: fluxcd/pkg/actions/kustomize@main
- name: Setup Helm - name: Setup Helm
uses: fluxcd/pkg/actions/helm@main uses: fluxcd/pkg/actions/helm@main
- name: Run E2E tests - name: Run E2E tests
env: env:
SKIP_COSIGN_VERIFICATION: true
CREATE_CLUSTER: false CREATE_CLUSTER: false
run: make e2e run: make e2e
- name: Print controller logs
if: always() kind-linux-arm64:
continue-on-error: true # Hosted on Equinix
# Docs: https://github.com/fluxcd/flux2/tree/main/.github/runners
runs-on: [self-hosted, Linux, ARM64, equinix]
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Setup Go
uses: actions/setup-go@v2
with:
go-version: 1.17.x
- name: Enable integration tests
# Only run integration tests for main branch
if: github.ref == 'refs/heads/main'
run: | run: |
kubectl -n source-system logs -l app=source-controller echo 'GO_TAGS=integration' >> $GITHUB_ENV
- name: Run tests
env:
TEST_AZURE_ACCOUNT_NAME: ${{ secrets.TEST_AZURE_ACCOUNT_NAME }}
TEST_AZURE_ACCOUNT_KEY: ${{ secrets.TEST_AZURE_ACCOUNT_KEY }}
run: make test
- name: Prepare
id: prep
run: |
echo ::set-output name=CLUSTER::arm64-${GITHUB_SHA:0:7}-$(date +%s)
echo ::set-output name=CONTEXT::kind-arm64-${GITHUB_SHA:0:7}-$(date +%s)
- name: Setup Kubernetes Kind
run: |
kind create cluster --name ${{ steps.prep.outputs.CLUSTER }} --kubeconfig=/tmp/${{ steps.prep.outputs.CLUSTER }}
- name: Run e2e tests
env:
KIND_CLUSTER_NAME: ${{ steps.prep.outputs.CLUSTER }}
KUBECONFIG: /tmp/${{ steps.prep.outputs.CLUSTER }}
CREATE_CLUSTER: false
BUILD_PLATFORM: linux/arm64
MINIO_TAG: RELEASE.2020-09-17T04-49-20Z-arm64
run: make e2e
- name: Cleanup
if: always()
run: |
kind delete cluster --name ${{ steps.prep.outputs.CLUSTER }}
rm /tmp/${{ steps.prep.outputs.CLUSTER }}
# Runs 'make test' on macos-10.15 to assure development environment for
# contributors using MacOS.
darwin-amd64:
runs-on: macos-10.15
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Setup Go
uses: actions/setup-go@v2
with:
go-version: 1.17.x
- name: Restore Go cache
uses: actions/cache@v1
with:
path: /home/runner/work/_temp/_github_home/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Run tests
run: make test

View File

@ -14,17 +14,18 @@ jobs:
build: build:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - uses: actions/checkout@v2
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Setup QEMU - name: Setup QEMU
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0 uses: docker/setup-qemu-action@v1
with:
platforms: all
- name: Setup Docker Buildx - name: Setup Docker Buildx
id: buildx id: buildx
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0 uses: docker/setup-buildx-action@v1
with: with:
buildkitd-flags: "--debug" buildkitd-flags: "--debug"
- name: Build multi-arch container image - name: Build multi-arch container image
uses: docker/build-push-action@1dc73863535b631f98b2378be8619f83b136f4a0 # v6.17.0 uses: docker/build-push-action@v2
with: with:
push: false push: false
builder: ${{ steps.buildx.outputs.name }} builder: ${{ steps.buildx.outputs.name }}

View File

@ -7,29 +7,22 @@ on:
inputs: inputs:
tag: tag:
description: 'image tag prefix' description: 'image tag prefix'
default: 'preview' default: 'rc'
required: true required: true
permissions: permissions:
contents: read contents: write # needed to write releases
id-token: write # needed for keyless signing
packages: write # needed for ghcr access
env: env:
CONTROLLER: ${{ github.event.repository.name }} CONTROLLER: ${{ github.event.repository.name }}
jobs: jobs:
release: build-push:
outputs:
hashes: ${{ steps.slsa.outputs.hashes }}
image_url: ${{ steps.slsa.outputs.image_url }}
image_digest: ${{ steps.slsa.outputs.image_digest }}
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions:
contents: write # for creating the GitHub release.
id-token: write # for creating OIDC tokens for signing.
packages: write # for pushing and signing container images.
steps: steps:
- name: Checkout - uses: actions/checkout@v2
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Setup Kustomize - name: Setup Kustomize
uses: fluxcd/pkg/actions/kustomize@main uses: fluxcd/pkg/actions/kustomize@main
- name: Prepare - name: Prepare
@ -39,27 +32,27 @@ jobs:
if [[ $GITHUB_REF == refs/tags/* ]]; then if [[ $GITHUB_REF == refs/tags/* ]]; then
VERSION=${GITHUB_REF/refs\/tags\//} VERSION=${GITHUB_REF/refs\/tags\//}
fi fi
echo "BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_OUTPUT echo ::set-output name=BUILD_DATE::$(date -u +'%Y-%m-%dT%H:%M:%SZ')
echo "VERSION=${VERSION}" >> $GITHUB_OUTPUT echo ::set-output name=VERSION::${VERSION}
- name: Setup QEMU - name: Setup QEMU
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0 uses: docker/setup-qemu-action@v1
- name: Setup Docker Buildx - name: Setup Docker Buildx
id: buildx id: buildx
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0 uses: docker/setup-buildx-action@v1
- name: Login to GitHub Container Registry - name: Login to GitHub Container Registry
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0 uses: docker/login-action@v1
with: with:
registry: ghcr.io registry: ghcr.io
username: fluxcdbot username: fluxcdbot
password: ${{ secrets.GHCR_TOKEN }} password: ${{ secrets.GHCR_TOKEN }}
- name: Login to Docker Hub - name: Login to Docker Hub
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0 uses: docker/login-action@v1
with: with:
username: fluxcdbot username: fluxcdbot
password: ${{ secrets.DOCKER_FLUXCD_PASSWORD }} password: ${{ secrets.DOCKER_FLUXCD_PASSWORD }}
- name: Generate images meta - name: Generate images meta
id: meta id: meta
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5.7.0 uses: docker/metadata-action@v3
with: with:
images: | images: |
fluxcd/${{ env.CONTROLLER }} fluxcd/${{ env.CONTROLLER }}
@ -67,11 +60,8 @@ jobs:
tags: | tags: |
type=raw,value=${{ steps.prep.outputs.VERSION }} type=raw,value=${{ steps.prep.outputs.VERSION }}
- name: Publish images - name: Publish images
id: build-push uses: docker/build-push-action@v2
uses: docker/build-push-action@1dc73863535b631f98b2378be8619f83b136f4a0 # v6.17.0
with: with:
sbom: true
provenance: true
push: true push: true
builder: ${{ steps.buildx.outputs.name }} builder: ${{ steps.buildx.outputs.name }}
context: . context: .
@ -79,82 +69,32 @@ jobs:
platforms: linux/amd64,linux/arm/v7,linux/arm64 platforms: linux/amd64,linux/arm/v7,linux/arm64
tags: ${{ steps.meta.outputs.tags }} tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }} labels: ${{ steps.meta.outputs.labels }}
- uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3.8.2 - name: Check images
run: |
docker buildx imagetools inspect docker.io/fluxcd/${{ env.CONTROLLER }}:${{ steps.prep.outputs.VERSION }}
docker buildx imagetools inspect ghcr.io/fluxcd/${{ env.CONTROLLER }}:${{ steps.prep.outputs.VERSION }}
docker pull docker.io/fluxcd/${{ env.CONTROLLER }}:${{ steps.prep.outputs.VERSION }}
docker pull ghcr.io/fluxcd/${{ env.CONTROLLER }}:${{ steps.prep.outputs.VERSION }}
- uses: sigstore/cosign-installer@main
- name: Sign images - name: Sign images
env: env:
COSIGN_EXPERIMENTAL: 1 COSIGN_EXPERIMENTAL: 1
run: | run: |
cosign sign --yes fluxcd/${{ env.CONTROLLER }}@${{ steps.build-push.outputs.digest }} cosign sign fluxcd/${{ env.CONTROLLER }}:${{ steps.prep.outputs.VERSION }}
cosign sign --yes ghcr.io/fluxcd/${{ env.CONTROLLER }}@${{ steps.build-push.outputs.digest }} cosign sign ghcr.io/fluxcd/${{ env.CONTROLLER }}:${{ steps.prep.outputs.VERSION }}
- name: Generate release artifacts - name: Generate release artifacts
if: startsWith(github.ref, 'refs/tags/v') if: startsWith(github.ref, 'refs/tags/v')
run: | run: |
mkdir -p config/release mkdir -p config/release
kustomize build ./config/crd > ./config/release/${{ env.CONTROLLER }}.crds.yaml kustomize build ./config/crd > ./config/release/${{ env.CONTROLLER }}.crds.yaml
kustomize build ./config/manager > ./config/release/${{ env.CONTROLLER }}.deployment.yaml kustomize build ./config/manager > ./config/release/${{ env.CONTROLLER }}.deployment.yaml
- uses: anchore/sbom-action/download-syft@e11c554f704a0b820cbf8c51673f6945e0731532 # v0.20.0 echo '[CHANGELOG](https://github.com/fluxcd/${{ env.CONTROLLER }}/blob/main/CHANGELOG.md)' > ./config/release/notes.md
- uses: anchore/sbom-action/download-syft@v0
- name: Create release and SBOM - name: Create release and SBOM
id: run-goreleaser
if: startsWith(github.ref, 'refs/tags/v') if: startsWith(github.ref, 'refs/tags/v')
uses: goreleaser/goreleaser-action@9c156ee8a17a598857849441385a2041ef570552 # v6.3.0 uses: goreleaser/goreleaser-action@v2
with: with:
version: latest version: latest
args: release --clean --skip=validate args: release --release-notes=config/release/notes.md --rm-dist --skip-validate
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Generate SLSA metadata
id: slsa
env:
ARTIFACTS: "${{ steps.run-goreleaser.outputs.artifacts }}"
run: |
hashes=$(echo -E $ARTIFACTS | jq --raw-output '.[] | {name, "digest": (.extra.Digest // .extra.Checksum)} | select(.digest) | {digest} + {name} | join(" ") | sub("^sha256:";"")' | base64 -w0)
echo "hashes=$hashes" >> $GITHUB_OUTPUT
image_url=fluxcd/${{ env.CONTROLLER }}:${{ steps.prep.outputs.version }}
echo "image_url=$image_url" >> $GITHUB_OUTPUT
image_digest=${{ steps.build-push.outputs.digest }}
echo "image_digest=$image_digest" >> $GITHUB_OUTPUT
release-provenance:
needs: [release]
permissions:
actions: read # for detecting the Github Actions environment.
id-token: write # for creating OIDC tokens for signing.
contents: write # for uploading attestations to GitHub releases.
if: startsWith(github.ref, 'refs/tags/v')
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
with:
provenance-name: "provenance.intoto.jsonl"
base64-subjects: "${{ needs.release.outputs.hashes }}"
upload-assets: true
dockerhub-provenance:
needs: [release]
permissions:
actions: read # for detecting the Github Actions environment.
id-token: write # for creating OIDC tokens for signing.
packages: write # for uploading attestations.
if: startsWith(github.ref, 'refs/tags/v')
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v2.1.0
with:
image: ${{ needs.release.outputs.image_url }}
digest: ${{ needs.release.outputs.image_digest }}
registry-username: fluxcdbot
secrets:
registry-password: ${{ secrets.DOCKER_FLUXCD_PASSWORD }}
ghcr-provenance:
needs: [release]
permissions:
actions: read # for detecting the Github Actions environment.
id-token: write # for creating OIDC tokens for signing.
packages: write # for uploading attestations.
if: startsWith(github.ref, 'refs/tags/v')
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v2.1.0
with:
image: ghcr.io/${{ needs.release.outputs.image_url }}
digest: ${{ needs.release.outputs.image_digest }}
registry-username: fluxcdbot
secrets:
registry-password: ${{ secrets.GHCR_TOKEN }}

View File

@ -1,10 +1,10 @@
name: scan name: Scan
on: on:
push: push:
branches: [ 'main', 'release/**' ] branches: [ main ]
pull_request: pull_request:
branches: [ 'main', 'release/**' ] branches: [ main ]
schedule: schedule:
- cron: '18 10 * * 3' - cron: '18 10 * * 3'
@ -17,10 +17,9 @@ jobs:
name: FOSSA name: FOSSA
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - uses: actions/checkout@v2
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Run FOSSA scan and upload build data - name: Run FOSSA scan and upload build data
uses: fossa-contrib/fossa-action@3d2ef181b1820d6dcd1972f86a767d18167fa19b # v3.0.1 uses: fossa-contrib/fossa-action@v1
with: with:
# FOSSA Push-Only API Token # FOSSA Push-Only API Token
fossa-api-key: 5ee8bf422db1471e0bcf2bcb289185de fossa-api-key: 5ee8bf422db1471e0bcf2bcb289185de
@ -30,23 +29,13 @@ jobs:
name: CodeQL name: CodeQL
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout repository
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 uses: actions/checkout@v2
- name: Setup Go
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
with:
go-version: 1.24.x
cache-dependency-path: |
**/go.sum
**/go.mod
- name: Initialize CodeQL - name: Initialize CodeQL
uses: github/codeql-action/init@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3.28.18 uses: github/codeql-action/init@v1
with: with:
languages: go languages: go
# xref: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
# xref: https://codeql.github.com/codeql-query-help/go/
queries: security-and-quality
- name: Autobuild - name: Autobuild
uses: github/codeql-action/autobuild@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3.28.18 uses: github/codeql-action/autobuild@v1
- name: Perform CodeQL Analysis - name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3.28.18 uses: github/codeql-action/analyze@v1

View File

@ -1,28 +0,0 @@
name: sync-labels
on:
workflow_dispatch:
push:
branches:
- main
paths:
- .github/labels.yaml
permissions:
contents: read
jobs:
labels:
name: Run sync
runs-on: ubuntu-latest
permissions:
issues: write
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: EndBug/label-sync@52074158190acb45f3077f9099fea818aa43f97a # v2.3.3
with:
# Configuration file
config-file: |
https://raw.githubusercontent.com/fluxcd/community/main/.github/standard-labels.yaml
.github/labels.yaml
# Strictly declarative
delete-other-labels: true

View File

@ -1,57 +0,0 @@
name: tests
on:
workflow_dispatch:
pull_request:
branches:
- 'main'
- 'release/**'
push:
branches:
- 'main'
- 'release/**'
permissions:
contents: read # for actions/checkout to fetch code
jobs:
test-linux-amd64:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Setup Go
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
with:
go-version: 1.24.x
cache-dependency-path: |
**/go.sum
**/go.mod
- name: Run tests
env:
SKIP_COSIGN_VERIFICATION: true
TEST_AZURE_ACCOUNT_NAME: ${{ secrets.TEST_AZURE_ACCOUNT_NAME }}
TEST_AZURE_ACCOUNT_KEY: ${{ secrets.TEST_AZURE_ACCOUNT_KEY }}
run: make test
test-linux-arm64:
runs-on:
group: "ARM64"
if: github.actor != 'dependabot[bot]'
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Setup Go
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
with:
go-version: 1.24.x
cache-dependency-path: |
**/go.sum
**/go.mod
- name: Run tests
env:
SKIP_COSIGN_VERIFICATION: true
TEST_AZURE_ACCOUNT_NAME: ${{ secrets.TEST_AZURE_ACCOUNT_NAME }}
TEST_AZURE_ACCOUNT_KEY: ${{ secrets.TEST_AZURE_ACCOUNT_KEY }}
run: make test

View File

@ -1,31 +0,0 @@
name: verify
on:
pull_request:
branches:
- 'main'
- 'release/**'
push:
branches:
- 'main'
- 'release/**'
permissions:
contents: read # for actions/checkout to fetch code
jobs:
verify-linux-amd64:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Setup Go
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
with:
go-version: 1.24.x
cache-dependency-path: |
**/go.sum
**/go.mod
- name: Verify
run: make verify

View File

@ -4,26 +4,9 @@ builds:
- skip: true - skip: true
release: release:
prerelease: "true"
extra_files: extra_files:
- glob: config/release/*.yaml - glob: config/release/*.yaml
prerelease: "auto"
header: |
## Changelog
[{{.Tag}} changelog](https://github.com/fluxcd/{{.ProjectName}}/blob/{{.Tag}}/CHANGELOG.md)
footer: |
## Container images
- `docker.io/fluxcd/{{.ProjectName}}:{{.Tag}}`
- `ghcr.io/fluxcd/{{.ProjectName}}:{{.Tag}}`
Supported architectures: `linux/amd64`, `linux/arm64` and `linux/arm/v7`.
The container images are built on GitHub hosted runners and are signed with cosign and GitHub OIDC.
To verify the images and their provenance (SLSA level 3), please see the [security documentation](https://fluxcd.io/flux/security/).
changelog:
disable: true
checksum: checksum:
extra_files: extra_files:
@ -49,7 +32,6 @@ signs:
certificate: "${artifact}.pem" certificate: "${artifact}.pem"
args: args:
- sign-blob - sign-blob
- "--yes"
- "--output-certificate=${certificate}" - "--output-certificate=${certificate}"
- "--output-signature=${signature}" - "--output-signature=${signature}"
- "${artifact}" - "${artifact}"

1291
ATTRIBUTIONS.md Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
# Development # Development
> **Note:** Please take a look at <https://fluxcd.io/contributing/flux/> > **Note:** Please take a look at <https://fluxcd.io/docs/contributing/flux/>
> to find out about how to contribute to Flux and how to interact with the > to find out about how to contribute to Flux and how to interact with the
> Flux Development team. > Flux Development team.
@ -13,9 +13,30 @@ There are a number of dependencies required to be able to run the controller and
- [Install Docker](https://docs.docker.com/engine/install/) - [Install Docker](https://docs.docker.com/engine/install/)
- (Optional) [Install Kubebuilder](https://book.kubebuilder.io/quick-start.html#installation) - (Optional) [Install Kubebuilder](https://book.kubebuilder.io/quick-start.html#installation)
The [libgit2](https://libgit2.org/) dependency is now automatically managed by the Makefile logic.
However, it depends on [pkg-config](https://freedesktop.org/wiki/Software/pkg-config/) being installed:
### macOS
```console
$ # Ensure pkg-config is installed
$ brew install pkg-config
```
### Linux
```console
$ # Ensure pkg-config is installed
$ pacman -S pkgconf
```
**Note:** Example shown is for Arch Linux, but likewise procedure can be
followed using any other package manager. Some distributions may have slight
variation of package names (e.g. `apt install -y pkg-config`).
In addition to the above, the following dependencies are also used by some of the `make` targets: In addition to the above, the following dependencies are also used by some of the `make` targets:
- `controller-gen` (v0.12.0) - `controller-gen` (v0.7.0)
- `gen-crd-api-reference-docs` (v0.3.0) - `gen-crd-api-reference-docs` (v0.3.0)
- `setup-envtest` (latest) - `setup-envtest` (latest)
@ -24,7 +45,7 @@ If any of the above dependencies are not present on your system, the first invoc
## How to run the test suite ## How to run the test suite
Prerequisites: Prerequisites:
* Go >= 1.24 * Go >= 1.17
You can run the test suite by simply doing You can run the test suite by simply doing
@ -32,14 +53,6 @@ You can run the test suite by simply doing
make test make test
``` ```
### Additional test configuration
By setting the `GO_TEST_ARGS` environment variable you can pass additional flags to [`go test`](https://pkg.go.dev/cmd/go#hdr-Test_packages):
```sh
make test GO_TEST_ARGS="-v -run=TestReadIgnoreFile/with_domain"
```
## How to run the controller locally ## How to run the controller locally
Install the controller's CRDs on your test cluster: Install the controller's CRDs on your test cluster:
@ -58,7 +71,7 @@ make run
### Building the container image ### Building the container image
Set the name of the container image to be created from the source code. This will be used Set the name of the container image to be created from the source code. This will be used
when building, pushing and referring to the image on YAML files: when building, pushing and referring to the image on YAML files:
```sh ```sh
@ -79,7 +92,7 @@ make docker-push
``` ```
Alternatively, the three steps above can be done in a single line: Alternatively, the three steps above can be done in a single line:
```sh ```sh
IMG=registry-path/source-controller TAG=latest BUILD_ARGS=--push \ IMG=registry-path/source-controller TAG=latest BUILD_ARGS=--push \
make docker-build make docker-build
@ -128,12 +141,18 @@ Create a `.vscode/launch.json` file:
"type": "go", "type": "go",
"request": "launch", "request": "launch",
"mode": "auto", "mode": "auto",
"program": "${workspaceFolder}/main.go", "envFile": "${workspaceFolder}/build/.env",
"args": ["--storage-adv-addr=:0", "--storage-path=${workspaceFolder}/bin/data"] "program": "${workspaceFolder}/main.go"
} }
] ]
} }
``` ```
Create the environment file containing details on how to load
`libgit2` dependencies:
```bash
make env
```
Start debugging by either clicking `Run` > `Start Debugging` or using Start debugging by either clicking `Run` > `Start Debugging` or using
the relevant shortcut. the relevant shortcut.

View File

@ -1,15 +1,28 @@
ARG GO_VERSION=1.24 ARG BASE_VARIANT=alpine
ARG XX_VERSION=1.6.1 ARG GO_VERSION=1.17
ARG XX_VERSION=1.1.0
ARG LIBGIT2_IMG=ghcr.io/fluxcd/golang-with-libgit2
ARG LIBGIT2_TAG=libgit2-1.3.1
FROM ${LIBGIT2_IMG}:${LIBGIT2_TAG} AS libgit2-libs
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
# Docker buildkit multi-arch build requires golang alpine FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-${BASE_VARIANT} as gostable
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS builder
FROM gostable AS go-linux
# Build-base consists of build platform dependencies and xx.
# These will be used at current arch to yield execute the cross compilations.
FROM go-${TARGETOS} AS build-base
RUN apk add --no-cache clang lld pkgconfig
# Copy the build utilities.
COPY --from=xx / / COPY --from=xx / /
ARG TARGETPLATFORM # build-go-mod can still be cached at build platform architecture.
FROM build-base as build-go-mod
# Configure workspace # Configure workspace
WORKDIR /workspace WORKDIR /workspace
@ -24,25 +37,76 @@ COPY go.sum go.sum
# Cache modules # Cache modules
RUN go mod download RUN go mod download
# Copy source code # The musl-tool-chain layer is an adhoc solution
COPY main.go main.go # for the problem in which xx gets confused during compilation
COPY pkg/ pkg/ # and a) looks for gold linker and then b) cannot find musl's dynamic linker.
COPY internal/ internal/ FROM --platform=$BUILDPLATFORM alpine as musl-tool-chain
COPY --from=xx / /
RUN apk add bash curl tar
WORKDIR /workspace
COPY hack/download-musl.sh .
ARG TARGETPLATFORM ARG TARGETPLATFORM
ARG TARGETARCH ARG TARGETARCH
RUN ROOT_DIR="$(pwd)" TARGET_ARCH="$(xx-info alpine-arch)" ENV_FILE=true \
./download-musl.sh
# build without specifing the arch # Build stage install per target platform
ENV CGO_ENABLED=0 # dependency and effectively cross compile the application.
RUN xx-go build -trimpath -a -o source-controller main.go FROM build-go-mod as build
FROM alpine:3.21 ARG TARGETPLATFORM
COPY --from=libgit2-libs /usr/local/ /usr/local/
# Some dependencies have to installed
# for the target platform: https://github.com/tonistiigi/xx#go--cgo
RUN xx-apk add musl-dev gcc lld
WORKDIR /workspace
# Copy source code
COPY main.go main.go
COPY controllers/ controllers/
COPY pkg/ pkg/
COPY internal/ internal/
COPY --from=musl-tool-chain /workspace/build /workspace/build
ARG TARGETPLATFORM
ARG TARGETARCH
ENV CGO_ENABLED=1
# Instead of using xx-go, (cross) compile with vanilla go leveraging musl tool chain.
RUN export $(cat build/musl/$(xx-info alpine-arch).env | xargs) && \
export LIBRARY_PATH="/usr/local/$(xx-info triple):/usr/local/$(xx-info triple)/lib64" && \
export PKG_CONFIG_PATH="/usr/local/$(xx-info triple)/lib/pkgconfig:/usr/local/$(xx-info triple)/lib64/pkgconfig" && \
export CGO_LDFLAGS="$(pkg-config --static --libs --cflags libssh2 openssl libgit2) -static" && \
GOARCH=$TARGETARCH go build \
-ldflags "-s -w" \
-tags 'netgo,osusergo,static_build' \
-o /source-controller -trimpath main.go;
# Ensure that the binary was cross-compiled correctly to the target platform.
RUN xx-verify --static /source-controller
FROM alpine:3.15
ARG TARGETPLATFORM ARG TARGETPLATFORM
RUN apk --no-cache add ca-certificates \ RUN apk --no-cache add ca-certificates \
&& update-ca-certificates && update-ca-certificates
COPY --from=builder /workspace/source-controller /usr/local/bin/ # Create minimal nsswitch.conf file to prioritize the usage of /etc/hosts over DNS queries.
# https://github.com/gliderlabs/docker-alpine/issues/367#issuecomment-354316460
RUN [ ! -e /etc/nsswitch.conf ] && echo 'hosts: files dns' > /etc/nsswitch.conf
# Copy over binary from build
COPY --from=build /source-controller /usr/local/bin/
COPY ATTRIBUTIONS.md /
USER 65534:65534 USER 65534:65534
ENTRYPOINT [ "source-controller" ] ENTRYPOINT [ "source-controller" ]

View File

@ -7,4 +7,6 @@ from the main Flux v2 git repository, as listed in
https://github.com/fluxcd/flux2/blob/main/MAINTAINERS https://github.com/fluxcd/flux2/blob/main/MAINTAINERS
Dipti Pai, Microsoft <diptipai@microsoft.com> (github: @dipti-pai, slack: Dipti Pai) In alphabetical order:
Paulo Gomes, Weaveworks <paulo.gomes@weave.works> (github: @pjbgf, slack: pjbgf)

178
Makefile
View File

@ -2,14 +2,9 @@
IMG ?= fluxcd/source-controller IMG ?= fluxcd/source-controller
TAG ?= latest TAG ?= latest
# Allows for defining additional Go test args, e.g. '-tags integration'. # Base image used to build the Go binary
GO_TEST_ARGS ?= -race LIBGIT2_IMG ?= ghcr.io/fluxcd/golang-with-libgit2
LIBGIT2_TAG ?= libgit2-1.3.1
# Allows for filtering tests based on the specified prefix
GO_TEST_PREFIX ?=
# Defines whether cosign verification should be skipped.
SKIP_COSIGN_VERIFICATION ?= false
# Allows for defining additional Docker buildx arguments, # Allows for defining additional Docker buildx arguments,
# e.g. '--push'. # e.g. '--push'.
@ -17,8 +12,7 @@ BUILD_ARGS ?=
# Architectures to build images for # Architectures to build images for
BUILD_PLATFORMS ?= linux/amd64,linux/arm64,linux/arm/v7 BUILD_PLATFORMS ?= linux/amd64,linux/arm64,linux/arm/v7
# Go additional tag arguments, e.g. 'integration', # Go additional tag arguments, e.g. 'integration'
# this is append to the tag arguments required for static builds
GO_TAGS ?= GO_TAGS ?=
# Produce CRDs that work back to Kubernetes 1.16 # Produce CRDs that work back to Kubernetes 1.16
@ -29,17 +23,50 @@ REPOSITORY_ROOT := $(shell git rev-parse --show-toplevel)
BUILD_DIR := $(REPOSITORY_ROOT)/build BUILD_DIR := $(REPOSITORY_ROOT)/build
# Other dependency versions # Other dependency versions
ENVTEST_BIN_VERSION ?= 1.24.0 ENVTEST_BIN_VERSION ?= 1.19.2
# FUZZ_TIME defines the max amount of time, in Go Duration, # Caches libgit2 versions per tag, "forcing" rebuild only when needed.
# each fuzzer should run for. LIBGIT2_PATH := $(BUILD_DIR)/libgit2/$(LIBGIT2_TAG)
FUZZ_TIME ?= 1m LIBGIT2_LIB_PATH := $(LIBGIT2_PATH)/lib
LIBGIT2_LIB64_PATH := $(LIBGIT2_PATH)/lib64
LIBGIT2 := $(LIBGIT2_LIB_PATH)/libgit2.a
MUSL-CC =
export CGO_ENABLED=1
export PKG_CONFIG_PATH=$(LIBGIT2_LIB_PATH)/pkgconfig
export LIBRARY_PATH=$(LIBGIT2_LIB_PATH)
export CGO_CFLAGS=-I$(LIBGIT2_PATH)/include -I$(LIBGIT2_PATH)/include/openssl
# The pkg-config command will yield warning messages until libgit2 is downloaded.
ifeq ($(shell uname -s),Darwin)
export CGO_LDFLAGS=$(shell PKG_CONFIG_PATH=$(PKG_CONFIG_PATH) pkg-config --libs --static --cflags libssh2 openssl libgit2 2>/dev/null)
GO_STATIC_FLAGS=-ldflags "-s -w" -tags 'netgo,osusergo,static_build$(addprefix ,,$(GO_TAGS))' GO_STATIC_FLAGS=-ldflags "-s -w" -tags 'netgo,osusergo,static_build$(addprefix ,,$(GO_TAGS))'
else
export PKG_CONFIG_PATH:=$(PKG_CONFIG_PATH):$(LIBGIT2_LIB64_PATH)/pkgconfig
export LIBRARY_PATH:=$(LIBRARY_PATH):$(LIBGIT2_LIB64_PATH)
export CGO_LDFLAGS=$(shell PKG_CONFIG_PATH=$(PKG_CONFIG_PATH) pkg-config --libs --static --cflags libssh2 openssl libgit2 2>/dev/null)
endif
ifeq ($(shell uname -s),Linux)
ifeq ($(shell uname -m),x86_64)
# Linux x86_64 seem to be able to cope with the static libraries
# by having only musl-dev installed, without the need of using musl toolchain.
GO_STATIC_FLAGS=-ldflags "-s -w" -tags 'netgo,osusergo,static_build$(addprefix ,,$(GO_TAGS))'
else
MUSL-PREFIX=$(BUILD_DIR)/musl/$(shell uname -m)-linux-musl-native/bin/$(shell uname -m)-linux-musl
MUSL-CC=$(MUSL-PREFIX)-gcc
export CC=$(MUSL-PREFIX)-gcc
export CXX=$(MUSL-PREFIX)-g++
export AR=$(MUSL-PREFIX)-ar
GO_STATIC_FLAGS=-ldflags "-s -w -extldflags \"-static\"" -tags 'netgo,osusergo,static_build$(addprefix ,,$(GO_TAGS))'
endif
endif
# API (doc) generation utilities # API (doc) generation utilities
CONTROLLER_GEN_VERSION ?= v0.16.1 CONTROLLER_GEN_VERSION ?= v0.7.0
GEN_API_REF_DOCS_VERSION ?= e327d0730470cbd61b06300f81c5fcf91c23c113 GEN_API_REF_DOCS_VERSION ?= v0.3.0
# If gobin not set, create one on ./build and add to path. # If gobin not set, create one on ./build and add to path.
ifeq (,$(shell go env GOBIN)) ifeq (,$(shell go env GOBIN))
@ -61,38 +88,26 @@ ifeq ($(shell uname -s),Darwin)
ENVTEST_ARCH=amd64 ENVTEST_ARCH=amd64
endif endif
all: manager all: build
# Build manager binary build: check-deps $(LIBGIT2) ## Build manager binary
manager: generate fmt vet
go build $(GO_STATIC_FLAGS) -o $(BUILD_DIR)/bin/manager main.go go build $(GO_STATIC_FLAGS) -o $(BUILD_DIR)/bin/manager main.go
KUBEBUILDER_ASSETS?="$(shell $(ENVTEST) --arch=$(ENVTEST_ARCH) use -i $(ENVTEST_KUBERNETES_VERSION) --bin-dir=$(ENVTEST_ASSETS_DIR) -p path)" KUBEBUILDER_ASSETS?="$(shell $(ENVTEST) --arch=$(ENVTEST_ARCH) use -i $(ENVTEST_KUBERNETES_VERSION) --bin-dir=$(ENVTEST_ASSETS_DIR) -p path)"
test: install-envtest test-api ## Run all tests test: $(LIBGIT2) install-envtest test-api check-deps ## Run tests
HTTPS_PROXY="" HTTP_PROXY="" \
KUBEBUILDER_ASSETS=$(KUBEBUILDER_ASSETS) \ KUBEBUILDER_ASSETS=$(KUBEBUILDER_ASSETS) \
GIT_CONFIG_GLOBAL=/dev/null \ go test $(GO_STATIC_FLAGS) ./... -coverprofile cover.out
GIT_CONFIG_NOSYSTEM=true \
go test $(GO_STATIC_FLAGS) \
./... \
$(GO_TEST_ARGS) \
-coverprofile cover.out
test-ctrl: install-envtest test-api ## Run controller tests check-deps:
HTTPS_PROXY="" HTTP_PROXY="" \ ifeq ($(shell uname -s),Darwin)
KUBEBUILDER_ASSETS=$(KUBEBUILDER_ASSETS) \ if ! command -v pkg-config &> /dev/null; then echo "pkg-config is required"; exit 1; fi
GIT_CONFIG_GLOBAL=/dev/null \ endif
go test $(GO_STATIC_FLAGS) \
-run "^$(GO_TEST_PREFIX).*" \
-v ./internal/controller \
-coverprofile cover.out
test-api: ## Run api tests test-api: ## Run api tests
cd api; go test $(GO_TEST_ARGS) ./... -coverprofile cover.out cd api; go test ./... -coverprofile cover.out
run: generate fmt vet manifests ## Run against the configured Kubernetes cluster in ~/.kube/config run: $(LIBGIT2) generate fmt vet manifests ## Run against the configured Kubernetes cluster in ~/.kube/config
@mkdir -p $(PWD)/bin/data go run $(GO_STATIC_FLAGS) ./main.go
go run $(GO_STATIC_FLAGS) ./main.go --storage-adv-addr=:0 --storage-path=$(PWD)/bin/data
install: manifests ## Install CRDs into a cluster install: manifests ## Install CRDs into a cluster
kustomize build config/crd | kubectl apply -f - kustomize build config/crd | kubectl apply -f -
@ -115,17 +130,18 @@ manifests: controller-gen ## Generate manifests, e.g. CRD, RBAC, etc.
cd api; $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role paths="./..." output:crd:artifacts:config="../config/crd/bases" cd api; $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role paths="./..." output:crd:artifacts:config="../config/crd/bases"
api-docs: gen-crd-api-reference-docs ## Generate API reference documentation api-docs: gen-crd-api-reference-docs ## Generate API reference documentation
$(GEN_CRD_API_REFERENCE_DOCS) -api-dir=./api/v1 -config=./hack/api-docs/config.json -template-dir=./hack/api-docs/template -out-file=./docs/api/v1/source.md $(GEN_CRD_API_REFERENCE_DOCS) -api-dir=./api/v1beta2 -config=./hack/api-docs/config.json -template-dir=./hack/api-docs/template -out-file=./docs/api/source.md
tidy: ## Run go mod tidy tidy: ## Run go mod tidy
cd api; rm -f go.sum; go mod tidy -compat=1.24 go mod tidy
rm -f go.sum; go mod tidy -compat=1.24 cd api; go mod tidy
fmt: ## Run go fmt against code fmt: ## Run go fmt against code
go fmt ./... go fmt ./...
cd api; go fmt ./... cd api; go fmt ./...
cd tests/fuzz; go fmt .
vet: ## Run go vet against code vet: $(LIBGIT2) ## Run go vet against code
go vet ./... go vet ./...
cd api; go vet ./... cd api; go vet ./...
@ -134,6 +150,8 @@ generate: controller-gen ## Generate API code
docker-build: ## Build the Docker image docker-build: ## Build the Docker image
docker buildx build \ docker buildx build \
--build-arg LIBGIT2_IMG=$(LIBGIT2_IMG) \
--build-arg LIBGIT2_TAG=$(LIBGIT2_TAG) \
--platform=$(BUILD_PLATFORMS) \ --platform=$(BUILD_PLATFORMS) \
-t $(IMG):$(TAG) \ -t $(IMG):$(TAG) \
$(BUILD_ARGS) . $(BUILD_ARGS) .
@ -145,13 +163,13 @@ docker-push: ## Push Docker image
CONTROLLER_GEN = $(GOBIN)/controller-gen CONTROLLER_GEN = $(GOBIN)/controller-gen
.PHONY: controller-gen .PHONY: controller-gen
controller-gen: ## Download controller-gen locally if necessary. controller-gen: ## Download controller-gen locally if necessary.
$(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_GEN_VERSION)) $(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.7.0)
# Find or download gen-crd-api-reference-docs # Find or download gen-crd-api-reference-docs
GEN_CRD_API_REFERENCE_DOCS = $(GOBIN)/gen-crd-api-reference-docs GEN_CRD_API_REFERENCE_DOCS = $(GOBIN)/gen-crd-api-reference-docs
.PHONY: gen-crd-api-reference-docs .PHONY: gen-crd-api-reference-docs
gen-crd-api-reference-docs: ## Download gen-crd-api-reference-docs locally if necessary gen-crd-api-reference-docs: ## Download gen-crd-api-reference-docs locally if necessary
$(call go-install-tool,$(GEN_CRD_API_REFERENCE_DOCS),github.com/ahmetb/gen-crd-api-reference-docs@$(GEN_API_REF_DOCS_VERSION)) $(call go-install-tool,$(GEN_CRD_API_REFERENCE_DOCS),github.com/ahmetb/gen-crd-api-reference-docs@v0.3.0)
ENVTEST = $(GOBIN)/setup-envtest ENVTEST = $(GOBIN)/setup-envtest
.PHONY: envtest .PHONY: envtest
@ -166,19 +184,47 @@ install-envtest: setup-envtest ## Download envtest binaries locally.
# setup-envtest sets anything below k8s to 0555 # setup-envtest sets anything below k8s to 0555
chmod -R u+w $(BUILD_DIR)/testbin chmod -R u+w $(BUILD_DIR)/testbin
libgit2: $(LIBGIT2) ## Detect or download libgit2 library
$(LIBGIT2): $(MUSL-CC)
IMG=$(LIBGIT2_IMG) TAG=$(LIBGIT2_TAG) ./hack/install-libraries.sh
$(MUSL-CC):
ifneq ($(shell uname -s),Darwin)
./hack/download-musl.sh
endif
.PHONY: help .PHONY: help
help: ## Display this help menu help: ## Display this help menu
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-20s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-20s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
update-attributions:
./hack/update-attributions.sh
e2e: e2e:
./hack/ci/e2e.sh ./hack/ci/e2e.sh
verify: fmt vet manifests api-docs tidy verify: update-attributions fmt vet manifests api-docs
@if [ ! "$$(git status --porcelain --untracked-files=no)" = "" ]; then \ ifneq ($(shell grep -o 'LIBGIT2_IMG ?= \w.*' Makefile | cut -d ' ' -f 3):$(shell grep -o 'LIBGIT2_TAG ?= \w.*' Makefile | cut -d ' ' -f 3), \
echo "working directory is dirty:"; \ $(shell grep -o "LIBGIT2_IMG=\w.*" Dockerfile | cut -d'=' -f2):$(shell grep -o "LIBGIT2_TAG=\w.*" Dockerfile | cut -d'=' -f2))
git --no-pager diff; \ @{ \
exit 1; \ echo "LIBGIT2_IMG and LIBGIT2_TAG must match in both Makefile and Dockerfile"; \
fi exit 1; \
}
endif
ifneq ($(shell grep -o 'LIBGIT2_TAG ?= \w.*' Makefile | cut -d ' ' -f 3), $(shell grep -o "LIBGIT2_TAG=.*" tests/fuzz/oss_fuzz_build.sh | sed 's;LIBGIT2_TAG="$${LIBGIT2_TAG:-;;g' | sed 's;}";;g'))
@{ \
echo "LIBGIT2_TAG must match in both Makefile and tests/fuzz/oss_fuzz_build.sh"; \
exit 1; \
}
endif
ifneq (, $(shell git status --porcelain --untracked-files=no))
@{ \
echo "working directory is dirty:"; \
git --no-pager diff; \
exit 1; \
}
endif
# go-install-tool will 'go install' any package $2 and install it to $1. # go-install-tool will 'go install' any package $2 and install it to $1.
define go-install-tool define go-install-tool
@ -188,33 +234,37 @@ TMP_DIR=$$(mktemp -d) ;\
cd $$TMP_DIR ;\ cd $$TMP_DIR ;\
go mod init tmp ;\ go mod init tmp ;\
echo "Downloading $(2)" ;\ echo "Downloading $(2)" ;\
env -i bash -c "GOBIN=$(GOBIN) PATH=\"$(PATH)\" GOPATH=$(shell go env GOPATH) GOCACHE=$(shell go env GOCACHE) go install $(2)" ;\ env -i bash -c "GOBIN=$(GOBIN) PATH=$(PATH) GOPATH=$(shell go env GOPATH) GOCACHE=$(shell go env GOCACHE) go install $(2)" ;\
rm -rf $$TMP_DIR ;\ rm -rf $$TMP_DIR ;\
} }
endef endef
# Build fuzzers used by oss-fuzz. # Build fuzzers
fuzz-build: fuzz-build: $(LIBGIT2)
rm -rf $(shell pwd)/build/fuzz/ rm -rf $(BUILD_DIR)/fuzz/
mkdir -p $(shell pwd)/build/fuzz/out/ mkdir -p $(BUILD_DIR)/fuzz/out/
docker build . --tag local-fuzzing:latest -f tests/fuzz/Dockerfile.builder docker build . --tag local-fuzzing:latest -f tests/fuzz/Dockerfile.builder
docker run --rm \ docker run --rm \
-e FUZZING_LANGUAGE=go -e SANITIZER=address \ -e FUZZING_LANGUAGE=go -e SANITIZER=address \
-e CIFUZZ_DEBUG='True' -e OSS_FUZZ_PROJECT_NAME=fluxcd \ -e CIFUZZ_DEBUG='True' -e OSS_FUZZ_PROJECT_NAME=fluxcd \
-v "$(shell pwd)/build/fuzz/out":/out \ -v "$(BUILD_DIR)/fuzz/out":/out \
local-fuzzing:latest local-fuzzing:latest
# Run each fuzzer once to ensure they will work when executed by oss-fuzz.
fuzz-smoketest: fuzz-build fuzz-smoketest: fuzz-build
docker run --rm \ docker run --rm \
-v "$(shell pwd)/build/fuzz/out":/out \ -v "$(BUILD_DIR)/fuzz/out":/out \
-v "$(shell pwd)/tests/fuzz/oss_fuzz_run.sh":/runner.sh \ -v "$(shell pwd)/tests/fuzz/oss_fuzz_run.sh":/runner.sh \
local-fuzzing:latest \ local-fuzzing:latest \
bash -c "/runner.sh" bash -c "/runner.sh"
# Run fuzz tests for the duration set in FUZZ_TIME. # Creates an env file that can be used to load all source-controller's dependencies
fuzz-native: # this is handy when you want to run adhoc debug sessions on tests or start the
KUBEBUILDER_ASSETS=$(KUBEBUILDER_ASSETS) \ # controller in a new debug session.
FUZZ_TIME=$(FUZZ_TIME) \ env: $(LIBGIT2)
./tests/fuzz/native_go_run.sh echo 'GO_ENABLED="1"' > $(BUILD_DIR)/.env
echo 'PKG_CONFIG_PATH="$(PKG_CONFIG_PATH)"' >> $(BUILD_DIR)/.env
echo 'LIBRARY_PATH="$(LIBRARY_PATH)"' >> $(BUILD_DIR)/.env
echo 'CGO_CFLAGS="$(CGO_CFLAGS)"' >> $(BUILD_DIR)/.env
echo 'CGO_LDFLAGS="$(CGO_LDFLAGS)"' >> $(BUILD_DIR)/.env
echo 'KUBEBUILDER_ASSETS=$(KUBEBUILDER_ASSETS)' >> $(BUILD_DIR)/.env

18
PROJECT
View File

@ -1,21 +1,12 @@
domain: toolkit.fluxcd.io domain: toolkit.fluxcd.io
repo: github.com/fluxcd/source-controller repo: github.com/fluxcd/source-controller
resources: resources:
- group: source
kind: GitRepository
version: v1
- group: source - group: source
kind: GitRepository kind: GitRepository
version: v1beta2 version: v1beta2
- group: source
kind: HelmRepository
version: v1
- group: source - group: source
kind: HelmRepository kind: HelmRepository
version: v1beta2 version: v1beta2
- group: source
kind: HelmChart
version: v1
- group: source - group: source
kind: HelmChart kind: HelmChart
version: v1beta2 version: v1beta2
@ -34,13 +25,4 @@ resources:
- group: source - group: source
kind: Bucket kind: Bucket
version: v1beta1 version: v1beta1
- group: source
kind: OCIRepository
version: v1beta2
- group: source
kind: Bucket
version: v1
- group: source
kind: OCIRepository
version: v1
version: "2" version: "2"

View File

@ -5,49 +5,23 @@
[![report](https://goreportcard.com/badge/github.com/fluxcd/source-controller)](https://goreportcard.com/report/github.com/fluxcd/source-controller) [![report](https://goreportcard.com/badge/github.com/fluxcd/source-controller)](https://goreportcard.com/report/github.com/fluxcd/source-controller)
[![license](https://img.shields.io/github/license/fluxcd/source-controller.svg)](https://github.com/fluxcd/source-controller/blob/main/LICENSE) [![license](https://img.shields.io/github/license/fluxcd/source-controller.svg)](https://github.com/fluxcd/source-controller/blob/main/LICENSE)
[![release](https://img.shields.io/github/release/fluxcd/source-controller/all.svg)](https://github.com/fluxcd/source-controller/releases) [![release](https://img.shields.io/github/release/fluxcd/source-controller/all.svg)](https://github.com/fluxcd/source-controller/releases)
The source-controller is a Kubernetes operator, specialised in artifacts acquisition The source-controller is a Kubernetes operator, specialised in artifacts acquisition
from external sources such as Git, OCI, Helm repositories and S3-compatible buckets. from external sources such as Git, Helm repositories and S3 buckets.
The source-controller implements the The source-controller implements the
[source.toolkit.fluxcd.io](docs/spec/README.md) API [source.toolkit.fluxcd.io](https://github.com/fluxcd/source-controller/tree/master/docs/spec/v1beta1) API
and is a core component of the [GitOps toolkit](https://fluxcd.io/flux/components/). and is a core component of the [GitOps toolkit](https://fluxcd.io/docs/components/).
![overview](docs/diagrams/source-controller-overview.png) ![overview](docs/diagrams/source-controller-overview.png)
## APIs Features:
| Kind | API Version | * authenticates to sources (SSH, user/password, API token)
|----------------------------------------------------|-------------------------------| * validates source authenticity (PGP)
| [GitRepository](docs/spec/v1/gitrepositories.md) | `source.toolkit.fluxcd.io/v1` |
| [OCIRepository](docs/spec/v1/ocirepositories.md) | `source.toolkit.fluxcd.io/v1` |
| [HelmRepository](docs/spec/v1/helmrepositories.md) | `source.toolkit.fluxcd.io/v1` |
| [HelmChart](docs/spec/v1/helmcharts.md) | `source.toolkit.fluxcd.io/v1` |
| [Bucket](docs/spec/v1/buckets.md) | `source.toolkit.fluxcd.io/v1` |
## Features
* authenticates to sources (SSH, user/password, API token, Workload Identity)
* validates source authenticity (PGP, Cosign, Notation)
* detects source changes based on update policies (semver) * detects source changes based on update policies (semver)
* fetches resources on-demand and on-a-schedule * fetches resources on-demand and on-a-schedule
* packages the fetched resources into a well-known format (tar.gz, yaml) * packages the fetched resources into a well-known format (tar.gz, yaml)
* makes the artifacts addressable by their source identifier (sha, version, ts) * makes the artifacts addressable by their source identifier (sha, version, ts)
* makes the artifacts available in-cluster to interested 3rd parties * makes the artifacts available in-cluster to interested 3rd parties
* notifies interested 3rd parties of source changes and availability (status conditions, events, hooks) * notifies interested 3rd parties of source changes and availability (status conditions, events, hooks)
* reacts to Git, Helm and OCI artifacts push events (via [notification-controller](https://github.com/fluxcd/notification-controller)) * reacts to Git push and Helm chart upload events (via [notification-controller](https://github.com/fluxcd/notification-controller))
## Guides
* [Get started with Flux](https://fluxcd.io/flux/get-started/)
* [Setup Webhook Receivers](https://fluxcd.io/flux/guides/webhook-receivers/)
* [Setup Notifications](https://fluxcd.io/flux/guides/notifications/)
* [How to build, publish and consume OCI Artifacts with Flux](https://fluxcd.io/flux/cheatsheets/oci-artifacts/)
## Roadmap
The roadmap for the Flux family of projects can be found at <https://fluxcd.io/roadmap/>.
## Contributing
This project is Apache 2.0 licensed and accepts contributions via GitHub pull requests.
To start contributing please see the [development guide](DEVELOPMENT.md).

View File

@ -1,35 +1,28 @@
module github.com/fluxcd/source-controller/api module github.com/fluxcd/source-controller/api
go 1.24.0 go 1.17
require ( require (
github.com/fluxcd/pkg/apis/acl v0.7.0 github.com/fluxcd/pkg/apis/acl v0.0.3
github.com/fluxcd/pkg/apis/meta v1.12.0 github.com/fluxcd/pkg/apis/meta v0.12.2
k8s.io/apimachinery v0.33.0 k8s.io/apimachinery v0.23.5
sigs.k8s.io/controller-runtime v0.21.0 sigs.k8s.io/controller-runtime v0.11.2
) )
// Fix CVE-2022-28948
replace gopkg.in/yaml.v3 => gopkg.in/yaml.v3 v3.0.1
require ( require (
github.com/fxamacker/cbor/v2 v2.8.0 // indirect github.com/go-logr/logr v1.2.2 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/gogo/protobuf v1.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect
github.com/google/go-cmp v0.5.6 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect github.com/json-iterator/go v1.1.12 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/spf13/pflag v1.0.6 // indirect golang.org/x/net v0.0.0-20211215060638-4ddde0e984e9 // indirect
github.com/x448/float16 v0.8.4 // indirect golang.org/x/text v0.3.7 // indirect
golang.org/x/net v0.40.0 // indirect
golang.org/x/text v0.25.0 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect
k8s.io/klog/v2 v2.130.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e // indirect k8s.io/klog/v2 v2.30.0 // indirect
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect k8s.io/utils v0.0.0-20211208161948-7d6a63dca704 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.7.0 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
) )

View File

@ -1,117 +1,929 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/fluxcd/pkg/apis/acl v0.7.0 h1:dMhZJH+g6ZRPjs4zVOAN9vHBd1DcavFgcIFkg5ooOE0= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/fluxcd/pkg/apis/acl v0.7.0/go.mod h1:uv7pXXR/gydiX4MUwlQa7vS8JONEDztynnjTvY3JxKQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/fluxcd/pkg/apis/meta v1.12.0 h1:XW15TKZieC2b7MN8VS85stqZJOx+/b8jATQ/xTUhVYg= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/fluxcd/pkg/apis/meta v1.12.0/go.mod h1:+son1Va60x2eiDcTwd7lcctbI6C+K3gM7R+ULmEq1SI= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/fxamacker/cbor/v2 v2.8.0 h1:fFtUGXUzXPHTIUdne5+zzMPTfffl3RD5qYnkY40vtxU= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/fxamacker/cbor/v2 v2.8.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ=
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fluxcd/pkg/apis/acl v0.0.3 h1:Lw0ZHdpnO4G7Zy9KjrzwwBmDZQuy4qEjaU/RvA6k1lc=
github.com/fluxcd/pkg/apis/acl v0.0.3/go.mod h1:XPts6lRJ9C9fIF9xVWofmQwftvhY25n1ps7W9xw0XLU=
github.com/fluxcd/pkg/apis/meta v0.12.2 h1:AiKAZxLyPtV150y63WC+mL1Qm4x5qWQmW6r4mLy1i8c=
github.com/fluxcd/pkg/apis/meta v0.12.2/go.mod h1:Z26X5uTU5LxAyWETGueRQY7TvdPaGfKU7Wye9bdUlho=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI=
github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg=
github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.2 h1:ahHml/yUpnlb96Rp8HCvtYVPY8ZYpxq3g7UYchIYwbs=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
github.com/google/cel-go v0.9.0/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w=
github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE=
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0=
go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE=
go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc=
go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4=
go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM=
go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU=
go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc=
go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE=
go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE=
go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211215060638-4ddde0e984e9 h1:kmreh1vGI63l2FxOAYS3Yv6ATsi7lSTuwNSVbGfJV9I=
golang.org/x/net v0.0.0-20211215060638-4ddde0e984e9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8 h1:M69LAlWZCshgp0QSzyDcSsSIejIEeuaCVpmwcKwyLMk=
golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201102152239-715cce707fb0/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
k8s.io/api v0.33.0 h1:yTgZVn1XEe6opVpP1FylmNrIFWuDqe2H0V8CT5gxfIU= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
k8s.io/api v0.33.0/go.mod h1:CTO61ECK/KU7haa3qq8sarQ0biLq2ju405IZAd9zsiM= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
k8s.io/apimachinery v0.33.0 h1:1a6kHrJxb2hs4t8EE5wuR/WxKDwGN1FKH3JvDtA0CIQ= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
k8s.io/apimachinery v0.33.0/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e h1:KqK5c/ghOm8xkHYhlodbp6i6+r+ChV2vuAuVRdFbLro= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
sigs.k8s.io/structured-merge-diff/v4 v4.7.0 h1:qPeWmscJcXP0snki5IYF79Z8xrl8ETFxgMd7wez1XkI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
sigs.k8s.io/structured-merge-diff/v4 v4.7.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.23.5 h1:zno3LUiMubxD/V1Zw3ijyKO3wxrhbUF1Ck+VjBvfaoA=
k8s.io/api v0.23.5/go.mod h1:Na4XuKng8PXJ2JsploYYrivXrINeTaycCGcYgF91Xm8=
k8s.io/apiextensions-apiserver v0.23.5/go.mod h1:ntcPWNXS8ZPKN+zTXuzYMeg731CP0heCTl6gYBxLcuQ=
k8s.io/apimachinery v0.23.5 h1:Va7dwhp8wgkUPWsEXk6XglXWU4IKYLKNlv8VkX7SDM0=
k8s.io/apimachinery v0.23.5/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=
k8s.io/apiserver v0.23.5/go.mod h1:7wvMtGJ42VRxzgVI7jkbKvMbuCbVbgsWFT7RyXiRNTw=
k8s.io/client-go v0.23.5/go.mod h1:flkeinTO1CirYgzMPRWxUCnV0G4Fbu2vLhYCObnt/r4=
k8s.io/code-generator v0.23.5/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk=
k8s.io/component-base v0.23.5/go.mod h1:c5Nq44KZyt1aLl0IpHX82fhsn84Sb0jjzwjpcA42bY0=
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw=
k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk=
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20211208161948-7d6a63dca704 h1:ZKMMxTvduyf5WUtREOqg5LiXaN1KO/+0oOQPRFrClpo=
k8s.io/utils v0.0.0-20211208161948-7d6a63dca704/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw=
sigs.k8s.io/controller-runtime v0.11.2 h1:H5GTxQl0Mc9UjRJhORusqfJCIjBO8UtUxGggCwL1rLA=
sigs.k8s.io/controller-runtime v0.11.2/go.mod h1:P6QCzrEjLaZGqHsfd+os7JQ+WFZhvB8MRFsn4dWF7O4=
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y=
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y=
sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=

View File

@ -1,93 +0,0 @@
/*
Copyright 2023 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"path"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// Artifact represents the output of a Source reconciliation.
type Artifact struct {
// Path is the relative file path of the Artifact. It can be used to locate
// the file in the root of the Artifact storage on the local file system of
// the controller managing the Source.
// +required
Path string `json:"path"`
// URL is the HTTP address of the Artifact as exposed by the controller
// managing the Source. It can be used to retrieve the Artifact for
// consumption, e.g. by another controller applying the Artifact contents.
// +required
URL string `json:"url"`
// Revision is a human-readable identifier traceable in the origin source
// system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.
// +required
Revision string `json:"revision"`
// Digest is the digest of the file in the form of '<algorithm>:<checksum>'.
// +optional
// +kubebuilder:validation:Pattern="^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$"
Digest string `json:"digest,omitempty"`
// LastUpdateTime is the timestamp corresponding to the last update of the
// Artifact.
// +required
LastUpdateTime metav1.Time `json:"lastUpdateTime"`
// Size is the number of bytes in the file.
// +optional
Size *int64 `json:"size,omitempty"`
// Metadata holds upstream information such as OCI annotations.
// +optional
Metadata map[string]string `json:"metadata,omitempty"`
}
// HasRevision returns if the given revision matches the current Revision of
// the Artifact.
func (in *Artifact) HasRevision(revision string) bool {
if in == nil {
return false
}
return in.Revision == revision
}
// HasDigest returns if the given digest matches the current Digest of the
// Artifact.
func (in *Artifact) HasDigest(digest string) bool {
if in == nil {
return false
}
return in.Digest == digest
}
// ArtifactDir returns the artifact dir path in the form of
// '<kind>/<namespace>/<name>'.
func ArtifactDir(kind, namespace, name string) string {
kind = strings.ToLower(kind)
return path.Join(kind, namespace, name)
}
// ArtifactPath returns the artifact path in the form of
// '<kind>/<namespace>/name>/<filename>'.
func ArtifactPath(kind, namespace, name, filename string) string {
return path.Join(ArtifactDir(kind, namespace, name), filename)
}

View File

@ -1,271 +0,0 @@
/*
Copyright 2024 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/fluxcd/pkg/apis/meta"
)
const (
// BucketKind is the string representation of a Bucket.
BucketKind = "Bucket"
)
const (
// BucketProviderGeneric for any S3 API compatible storage Bucket.
BucketProviderGeneric string = "generic"
// BucketProviderAmazon for an AWS S3 object storage Bucket.
// Provides support for retrieving credentials from the AWS EC2 service.
BucketProviderAmazon string = "aws"
// BucketProviderGoogle for a Google Cloud Storage Bucket.
// Provides support for authentication using a workload identity.
BucketProviderGoogle string = "gcp"
// BucketProviderAzure for an Azure Blob Storage Bucket.
// Provides support for authentication using a Service Principal,
// Managed Identity or Shared Key.
BucketProviderAzure string = "azure"
)
// BucketSpec specifies the required configuration to produce an Artifact for
// an object storage bucket.
// +kubebuilder:validation:XValidation:rule="self.provider == 'aws' || self.provider == 'generic' || !has(self.sts)", message="STS configuration is only supported for the 'aws' and 'generic' Bucket providers"
// +kubebuilder:validation:XValidation:rule="self.provider != 'aws' || !has(self.sts) || self.sts.provider == 'aws'", message="'aws' is the only supported STS provider for the 'aws' Bucket provider"
// +kubebuilder:validation:XValidation:rule="self.provider != 'generic' || !has(self.sts) || self.sts.provider == 'ldap'", message="'ldap' is the only supported STS provider for the 'generic' Bucket provider"
// +kubebuilder:validation:XValidation:rule="!has(self.sts) || self.sts.provider != 'aws' || !has(self.sts.secretRef)", message="spec.sts.secretRef is not required for the 'aws' STS provider"
// +kubebuilder:validation:XValidation:rule="!has(self.sts) || self.sts.provider != 'aws' || !has(self.sts.certSecretRef)", message="spec.sts.certSecretRef is not required for the 'aws' STS provider"
type BucketSpec struct {
// Provider of the object storage bucket.
// Defaults to 'generic', which expects an S3 (API) compatible object
// storage.
// +kubebuilder:validation:Enum=generic;aws;gcp;azure
// +kubebuilder:default:=generic
// +optional
Provider string `json:"provider,omitempty"`
// BucketName is the name of the object storage bucket.
// +required
BucketName string `json:"bucketName"`
// Endpoint is the object storage address the BucketName is located at.
// +required
Endpoint string `json:"endpoint"`
// STS specifies the required configuration to use a Security Token
// Service for fetching temporary credentials to authenticate in a
// Bucket provider.
//
// This field is only supported for the `aws` and `generic` providers.
// +optional
STS *BucketSTSSpec `json:"sts,omitempty"`
// Insecure allows connecting to a non-TLS HTTP Endpoint.
// +optional
Insecure bool `json:"insecure,omitempty"`
// Region of the Endpoint where the BucketName is located in.
// +optional
Region string `json:"region,omitempty"`
// Prefix to use for server-side filtering of files in the Bucket.
// +optional
Prefix string `json:"prefix,omitempty"`
// SecretRef specifies the Secret containing authentication credentials
// for the Bucket.
// +optional
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
// CertSecretRef can be given the name of a Secret containing
// either or both of
//
// - a PEM-encoded client certificate (`tls.crt`) and private
// key (`tls.key`);
// - a PEM-encoded CA certificate (`ca.crt`)
//
// and whichever are supplied, will be used for connecting to the
// bucket. The client cert and key are useful if you are
// authenticating with a certificate; the CA cert is useful if
// you are using a self-signed server certificate. The Secret must
// be of type `Opaque` or `kubernetes.io/tls`.
//
// This field is only supported for the `generic` provider.
// +optional
CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"`
// ProxySecretRef specifies the Secret containing the proxy configuration
// to use while communicating with the Bucket server.
// +optional
ProxySecretRef *meta.LocalObjectReference `json:"proxySecretRef,omitempty"`
// Interval at which the Bucket Endpoint is checked for updates.
// This interval is approximate and may be subject to jitter to ensure
// efficient use of resources.
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
// +required
Interval metav1.Duration `json:"interval"`
// Timeout for fetch operations, defaults to 60s.
// +kubebuilder:default="60s"
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$"
// +optional
Timeout *metav1.Duration `json:"timeout,omitempty"`
// Ignore overrides the set of excluded patterns in the .sourceignore format
// (which is the same as .gitignore). If not provided, a default will be used,
// consult the documentation for your version to find out what those are.
// +optional
Ignore *string `json:"ignore,omitempty"`
// Suspend tells the controller to suspend the reconciliation of this
// Bucket.
// +optional
Suspend bool `json:"suspend,omitempty"`
}
// BucketSTSSpec specifies the required configuration to use a Security Token
// Service for fetching temporary credentials to authenticate in a Bucket
// provider.
type BucketSTSSpec struct {
// Provider of the Security Token Service.
// +kubebuilder:validation:Enum=aws;ldap
// +required
Provider string `json:"provider"`
// Endpoint is the HTTP/S endpoint of the Security Token Service from
// where temporary credentials will be fetched.
// +required
// +kubebuilder:validation:Pattern="^(http|https)://.*$"
Endpoint string `json:"endpoint"`
// SecretRef specifies the Secret containing authentication credentials
// for the STS endpoint. This Secret must contain the fields `username`
// and `password` and is supported only for the `ldap` provider.
// +optional
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
// CertSecretRef can be given the name of a Secret containing
// either or both of
//
// - a PEM-encoded client certificate (`tls.crt`) and private
// key (`tls.key`);
// - a PEM-encoded CA certificate (`ca.crt`)
//
// and whichever are supplied, will be used for connecting to the
// STS endpoint. The client cert and key are useful if you are
// authenticating with a certificate; the CA cert is useful if
// you are using a self-signed server certificate. The Secret must
// be of type `Opaque` or `kubernetes.io/tls`.
//
// This field is only supported for the `ldap` provider.
// +optional
CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"`
}
// BucketStatus records the observed state of a Bucket.
type BucketStatus struct {
// ObservedGeneration is the last observed generation of the Bucket object.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
// Conditions holds the conditions for the Bucket.
// +optional
Conditions []metav1.Condition `json:"conditions,omitempty"`
// URL is the dynamic fetch link for the latest Artifact.
// It is provided on a "best effort" basis, and using the precise
// BucketStatus.Artifact data is recommended.
// +optional
URL string `json:"url,omitempty"`
// Artifact represents the last successful Bucket reconciliation.
// +optional
Artifact *Artifact `json:"artifact,omitempty"`
// ObservedIgnore is the observed exclusion patterns used for constructing
// the source artifact.
// +optional
ObservedIgnore *string `json:"observedIgnore,omitempty"`
meta.ReconcileRequestStatus `json:",inline"`
}
const (
// BucketOperationSucceededReason signals that the Bucket listing and fetch
// operations succeeded.
BucketOperationSucceededReason string = "BucketOperationSucceeded"
// BucketOperationFailedReason signals that the Bucket listing or fetch
// operations failed.
BucketOperationFailedReason string = "BucketOperationFailed"
)
// GetConditions returns the status conditions of the object.
func (in *Bucket) GetConditions() []metav1.Condition {
return in.Status.Conditions
}
// SetConditions sets the status conditions on the object.
func (in *Bucket) SetConditions(conditions []metav1.Condition) {
in.Status.Conditions = conditions
}
// GetRequeueAfter returns the duration after which the source must be reconciled again.
func (in *Bucket) GetRequeueAfter() time.Duration {
return in.Spec.Interval.Duration
}
// GetArtifact returns the latest artifact from the source if present in the status sub-resource.
func (in *Bucket) GetArtifact() *Artifact {
return in.Status.Artifact
}
// +genclient
// +kubebuilder:storageversion
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Endpoint",type=string,JSONPath=`.spec.endpoint`
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
// Bucket is the Schema for the buckets API.
type Bucket struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec BucketSpec `json:"spec,omitempty"`
// +kubebuilder:default={"observedGeneration":-1}
Status BucketStatus `json:"status,omitempty"`
}
// BucketList contains a list of Bucket objects.
// +kubebuilder:object:root=true
type BucketList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Bucket `json:"items"`
}
func init() {
SchemeBuilder.Register(&Bucket{}, &BucketList{})
}

View File

@ -1,118 +0,0 @@
/*
Copyright 2023 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
const SourceFinalizer = "finalizers.fluxcd.io"
const (
// ArtifactInStorageCondition indicates the availability of the Artifact in
// the storage.
// If True, the Artifact is stored successfully.
// This Condition is only present on the resource if the Artifact is
// successfully stored.
ArtifactInStorageCondition string = "ArtifactInStorage"
// ArtifactOutdatedCondition indicates the current Artifact of the Source
// is outdated.
// This is a "negative polarity" or "abnormal-true" type, and is only
// present on the resource if it is True.
ArtifactOutdatedCondition string = "ArtifactOutdated"
// SourceVerifiedCondition indicates the integrity verification of the
// Source.
// If True, the integrity check succeeded. If False, it failed.
// This Condition is only present on the resource if the integrity check
// is enabled.
SourceVerifiedCondition string = "SourceVerified"
// FetchFailedCondition indicates a transient or persistent fetch failure
// of an upstream Source.
// If True, observations on the upstream Source revision may be impossible,
// and the Artifact available for the Source may be outdated.
// This is a "negative polarity" or "abnormal-true" type, and is only
// present on the resource if it is True.
FetchFailedCondition string = "FetchFailed"
// BuildFailedCondition indicates a transient or persistent build failure
// of a Source's Artifact.
// If True, the Source can be in an ArtifactOutdatedCondition.
// This is a "negative polarity" or "abnormal-true" type, and is only
// present on the resource if it is True.
BuildFailedCondition string = "BuildFailed"
// StorageOperationFailedCondition indicates a transient or persistent
// failure related to storage. If True, the reconciliation failed while
// performing some filesystem operation.
// This is a "negative polarity" or "abnormal-true" type, and is only
// present on the resource if it is True.
StorageOperationFailedCondition string = "StorageOperationFailed"
)
// Reasons are provided as utility, and not part of the declarative API.
const (
// URLInvalidReason signals that a given Source has an invalid URL.
URLInvalidReason string = "URLInvalid"
// AuthenticationFailedReason signals that a Secret does not have the
// required fields, or the provided credentials do not match.
AuthenticationFailedReason string = "AuthenticationFailed"
// VerificationError signals that the Source's verification
// check failed.
VerificationError string = "VerificationError"
// DirCreationFailedReason signals a failure caused by a directory creation
// operation.
DirCreationFailedReason string = "DirectoryCreationFailed"
// StatOperationFailedReason signals a failure caused by a stat operation on
// a path.
StatOperationFailedReason string = "StatOperationFailed"
// ReadOperationFailedReason signals a failure caused by a read operation.
ReadOperationFailedReason string = "ReadOperationFailed"
// AcquireLockFailedReason signals a failure in acquiring lock.
AcquireLockFailedReason string = "AcquireLockFailed"
// InvalidPathReason signals a failure caused by an invalid path.
InvalidPathReason string = "InvalidPath"
// ArchiveOperationFailedReason signals a failure in archive operation.
ArchiveOperationFailedReason string = "ArchiveOperationFailed"
// SymlinkUpdateFailedReason signals a failure in updating a symlink.
SymlinkUpdateFailedReason string = "SymlinkUpdateFailed"
// ArtifactUpToDateReason signals that an existing Artifact is up-to-date
// with the Source.
ArtifactUpToDateReason string = "ArtifactUpToDate"
// CacheOperationFailedReason signals a failure in cache operation.
CacheOperationFailedReason string = "CacheOperationFailed"
// PatchOperationFailedReason signals a failure in patching a kubernetes API
// object.
PatchOperationFailedReason string = "PatchOperationFailed"
// InvalidSTSConfigurationReason signals that the STS configurtion is invalid.
InvalidSTSConfigurationReason string = "InvalidSTSConfiguration"
// InvalidProviderConfigurationReason signals that the provider
// configuration is invalid.
InvalidProviderConfigurationReason string = "InvalidProviderConfiguration"
)

View File

@ -1,378 +0,0 @@
/*
Copyright 2023 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/fluxcd/pkg/apis/meta"
)
const (
// GitRepositoryKind is the string representation of a GitRepository.
GitRepositoryKind = "GitRepository"
// GitProviderGeneric provides support for authentication using
// credentials specified in secretRef.
GitProviderGeneric string = "generic"
// GitProviderAzure provides support for authentication to azure
// repositories using Managed Identity.
GitProviderAzure string = "azure"
// GitProviderGitHub provides support for authentication to git
// repositories using GitHub App authentication
GitProviderGitHub string = "github"
)
const (
// IncludeUnavailableCondition indicates one of the includes is not
// available. For example, because it does not exist, or does not have an
// Artifact.
// This is a "negative polarity" or "abnormal-true" type, and is only
// present on the resource if it is True.
IncludeUnavailableCondition string = "IncludeUnavailable"
)
// GitVerificationMode specifies the verification mode for a Git repository.
type GitVerificationMode string
// Valid checks the validity of the Git verification mode.
func (m GitVerificationMode) Valid() bool {
switch m {
case ModeGitHEAD, ModeGitTag, ModeGitTagAndHEAD:
return true
default:
return false
}
}
const (
// ModeGitHEAD implies that the HEAD of the Git repository (after it has been
// checked out to the required commit) should be verified.
ModeGitHEAD GitVerificationMode = "HEAD"
// ModeGitTag implies that the tag object specified in the checkout configuration
// should be verified.
ModeGitTag GitVerificationMode = "Tag"
// ModeGitTagAndHEAD implies that both the tag object and the commit it points
// to should be verified.
ModeGitTagAndHEAD GitVerificationMode = "TagAndHEAD"
)
// GitRepositorySpec specifies the required configuration to produce an
// Artifact for a Git repository.
type GitRepositorySpec struct {
// URL specifies the Git repository URL, it can be an HTTP/S or SSH address.
// +kubebuilder:validation:Pattern="^(http|https|ssh)://.*$"
// +required
URL string `json:"url"`
// SecretRef specifies the Secret containing authentication credentials for
// the GitRepository.
// For HTTPS repositories the Secret must contain 'username' and 'password'
// fields for basic auth or 'bearerToken' field for token auth.
// For SSH repositories the Secret must contain 'identity'
// and 'known_hosts' fields.
// +optional
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
// Provider used for authentication, can be 'azure', 'github', 'generic'.
// When not specified, defaults to 'generic'.
// +kubebuilder:validation:Enum=generic;azure;github
// +optional
Provider string `json:"provider,omitempty"`
// Interval at which the GitRepository URL is checked for updates.
// This interval is approximate and may be subject to jitter to ensure
// efficient use of resources.
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
// +required
Interval metav1.Duration `json:"interval"`
// Timeout for Git operations like cloning, defaults to 60s.
// +kubebuilder:default="60s"
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$"
// +optional
Timeout *metav1.Duration `json:"timeout,omitempty"`
// Reference specifies the Git reference to resolve and monitor for
// changes, defaults to the 'master' branch.
// +optional
Reference *GitRepositoryRef `json:"ref,omitempty"`
// Verification specifies the configuration to verify the Git commit
// signature(s).
// +optional
Verification *GitRepositoryVerification `json:"verify,omitempty"`
// ProxySecretRef specifies the Secret containing the proxy configuration
// to use while communicating with the Git server.
// +optional
ProxySecretRef *meta.LocalObjectReference `json:"proxySecretRef,omitempty"`
// Ignore overrides the set of excluded patterns in the .sourceignore format
// (which is the same as .gitignore). If not provided, a default will be used,
// consult the documentation for your version to find out what those are.
// +optional
Ignore *string `json:"ignore,omitempty"`
// Suspend tells the controller to suspend the reconciliation of this
// GitRepository.
// +optional
Suspend bool `json:"suspend,omitempty"`
// RecurseSubmodules enables the initialization of all submodules within
// the GitRepository as cloned from the URL, using their default settings.
// +optional
RecurseSubmodules bool `json:"recurseSubmodules,omitempty"`
// Include specifies a list of GitRepository resources which Artifacts
// should be included in the Artifact produced for this GitRepository.
// +optional
Include []GitRepositoryInclude `json:"include,omitempty"`
// SparseCheckout specifies a list of directories to checkout when cloning
// the repository. If specified, only these directories are included in the
// Artifact produced for this GitRepository.
// +optional
SparseCheckout []string `json:"sparseCheckout,omitempty"`
}
// GitRepositoryInclude specifies a local reference to a GitRepository which
// Artifact (sub-)contents must be included, and where they should be placed.
type GitRepositoryInclude struct {
// GitRepositoryRef specifies the GitRepository which Artifact contents
// must be included.
// +required
GitRepositoryRef meta.LocalObjectReference `json:"repository"`
// FromPath specifies the path to copy contents from, defaults to the root
// of the Artifact.
// +optional
FromPath string `json:"fromPath,omitempty"`
// ToPath specifies the path to copy contents to, defaults to the name of
// the GitRepositoryRef.
// +optional
ToPath string `json:"toPath,omitempty"`
}
// GetFromPath returns the specified FromPath.
func (in *GitRepositoryInclude) GetFromPath() string {
return in.FromPath
}
// GetToPath returns the specified ToPath, falling back to the name of the
// GitRepositoryRef.
func (in *GitRepositoryInclude) GetToPath() string {
if in.ToPath == "" {
return in.GitRepositoryRef.Name
}
return in.ToPath
}
// GitRepositoryRef specifies the Git reference to resolve and checkout.
type GitRepositoryRef struct {
// Branch to check out, defaults to 'master' if no other field is defined.
// +optional
Branch string `json:"branch,omitempty"`
// Tag to check out, takes precedence over Branch.
// +optional
Tag string `json:"tag,omitempty"`
// SemVer tag expression to check out, takes precedence over Tag.
// +optional
SemVer string `json:"semver,omitempty"`
// Name of the reference to check out; takes precedence over Branch, Tag and SemVer.
//
// It must be a valid Git reference: https://git-scm.com/docs/git-check-ref-format#_description
// Examples: "refs/heads/main", "refs/tags/v0.1.0", "refs/pull/420/head", "refs/merge-requests/1/head"
// +optional
Name string `json:"name,omitempty"`
// Commit SHA to check out, takes precedence over all reference fields.
//
// This can be combined with Branch to shallow clone the branch, in which
// the commit is expected to exist.
// +optional
Commit string `json:"commit,omitempty"`
}
// GitRepositoryVerification specifies the Git commit signature verification
// strategy.
type GitRepositoryVerification struct {
// Mode specifies which Git object(s) should be verified.
//
// The variants "head" and "HEAD" both imply the same thing, i.e. verify
// the commit that the HEAD of the Git repository points to. The variant
// "head" solely exists to ensure backwards compatibility.
// +kubebuilder:validation:Enum=head;HEAD;Tag;TagAndHEAD
// +optional
// +kubebuilder:default:=HEAD
Mode GitVerificationMode `json:"mode,omitempty"`
// SecretRef specifies the Secret containing the public keys of trusted Git
// authors.
// +required
SecretRef meta.LocalObjectReference `json:"secretRef"`
}
// GitRepositoryStatus records the observed state of a Git repository.
type GitRepositoryStatus struct {
// ObservedGeneration is the last observed generation of the GitRepository
// object.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
// Conditions holds the conditions for the GitRepository.
// +optional
Conditions []metav1.Condition `json:"conditions,omitempty"`
// Artifact represents the last successful GitRepository reconciliation.
// +optional
Artifact *Artifact `json:"artifact,omitempty"`
// IncludedArtifacts contains a list of the last successfully included
// Artifacts as instructed by GitRepositorySpec.Include.
// +optional
IncludedArtifacts []*Artifact `json:"includedArtifacts,omitempty"`
// ObservedIgnore is the observed exclusion patterns used for constructing
// the source artifact.
// +optional
ObservedIgnore *string `json:"observedIgnore,omitempty"`
// ObservedRecurseSubmodules is the observed resource submodules
// configuration used to produce the current Artifact.
// +optional
ObservedRecurseSubmodules bool `json:"observedRecurseSubmodules,omitempty"`
// ObservedInclude is the observed list of GitRepository resources used to
// produce the current Artifact.
// +optional
ObservedInclude []GitRepositoryInclude `json:"observedInclude,omitempty"`
// ObservedSparseCheckout is the observed list of directories used to
// produce the current Artifact.
// +optional
ObservedSparseCheckout []string `json:"observedSparseCheckout,omitempty"`
// SourceVerificationMode is the last used verification mode indicating
// which Git object(s) have been verified.
// +optional
SourceVerificationMode *GitVerificationMode `json:"sourceVerificationMode,omitempty"`
meta.ReconcileRequestStatus `json:",inline"`
}
const (
// GitOperationSucceedReason signals that a Git operation (e.g. clone,
// checkout, etc.) succeeded.
GitOperationSucceedReason string = "GitOperationSucceeded"
// GitOperationFailedReason signals that a Git operation (e.g. clone,
// checkout, etc.) failed.
GitOperationFailedReason string = "GitOperationFailed"
)
// GetConditions returns the status conditions of the object.
func (in GitRepository) GetConditions() []metav1.Condition {
return in.Status.Conditions
}
// SetConditions sets the status conditions on the object.
func (in *GitRepository) SetConditions(conditions []metav1.Condition) {
in.Status.Conditions = conditions
}
// GetRequeueAfter returns the duration after which the GitRepository must be
// reconciled again.
func (in GitRepository) GetRequeueAfter() time.Duration {
return in.Spec.Interval.Duration
}
// GetArtifact returns the latest Artifact from the GitRepository if present in
// the status sub-resource.
func (in *GitRepository) GetArtifact() *Artifact {
return in.Status.Artifact
}
// GetProvider returns the Git authentication provider.
func (v *GitRepository) GetProvider() string {
if v.Spec.Provider == "" {
return GitProviderGeneric
}
return v.Spec.Provider
}
// GetMode returns the declared GitVerificationMode, or a ModeGitHEAD default.
func (v *GitRepositoryVerification) GetMode() GitVerificationMode {
if v.Mode.Valid() {
return v.Mode
}
return ModeGitHEAD
}
// VerifyHEAD returns if the configured mode instructs verification of the
// Git HEAD.
func (v *GitRepositoryVerification) VerifyHEAD() bool {
return v.GetMode() == ModeGitHEAD || v.GetMode() == ModeGitTagAndHEAD
}
// VerifyTag returns if the configured mode instructs verification of the
// Git tag.
func (v *GitRepositoryVerification) VerifyTag() bool {
return v.GetMode() == ModeGitTag || v.GetMode() == ModeGitTagAndHEAD
}
// +genclient
// +kubebuilder:storageversion
// +kubebuilder:object:root=true
// +kubebuilder:resource:shortName=gitrepo
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url`
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
// GitRepository is the Schema for the gitrepositories API.
type GitRepository struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec GitRepositorySpec `json:"spec,omitempty"`
// +kubebuilder:default={"observedGeneration":-1}
Status GitRepositoryStatus `json:"status,omitempty"`
}
// GitRepositoryList contains a list of GitRepository objects.
// +kubebuilder:object:root=true
type GitRepositoryList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []GitRepository `json:"items"`
}
func init() {
SchemeBuilder.Register(&GitRepository{}, &GitRepositoryList{})
}

View File

@ -1,33 +0,0 @@
/*
Copyright 2023 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// GroupVersion is group version used to register these objects.
GroupVersion = schema.GroupVersion{Group: "source.toolkit.fluxcd.io", Version: "v1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme.
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)

View File

@ -1,227 +0,0 @@
/*
Copyright 2024 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/fluxcd/pkg/apis/meta"
)
// HelmChartKind is the string representation of a HelmChart.
const HelmChartKind = "HelmChart"
// HelmChartSpec specifies the desired state of a Helm chart.
type HelmChartSpec struct {
// Chart is the name or path the Helm chart is available at in the
// SourceRef.
// +required
Chart string `json:"chart"`
// Version is the chart version semver expression, ignored for charts from
// GitRepository and Bucket sources. Defaults to latest when omitted.
// +kubebuilder:default:=*
// +optional
Version string `json:"version,omitempty"`
// SourceRef is the reference to the Source the chart is available at.
// +required
SourceRef LocalHelmChartSourceReference `json:"sourceRef"`
// Interval at which the HelmChart SourceRef is checked for updates.
// This interval is approximate and may be subject to jitter to ensure
// efficient use of resources.
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
// +required
Interval metav1.Duration `json:"interval"`
// ReconcileStrategy determines what enables the creation of a new artifact.
// Valid values are ('ChartVersion', 'Revision').
// See the documentation of the values for an explanation on their behavior.
// Defaults to ChartVersion when omitted.
// +kubebuilder:validation:Enum=ChartVersion;Revision
// +kubebuilder:default:=ChartVersion
// +optional
ReconcileStrategy string `json:"reconcileStrategy,omitempty"`
// ValuesFiles is an alternative list of values files to use as the chart
// values (values.yaml is not included by default), expected to be a
// relative path in the SourceRef.
// Values files are merged in the order of this list with the last file
// overriding the first. Ignored when omitted.
// +optional
ValuesFiles []string `json:"valuesFiles,omitempty"`
// IgnoreMissingValuesFiles controls whether to silently ignore missing values
// files rather than failing.
// +optional
IgnoreMissingValuesFiles bool `json:"ignoreMissingValuesFiles,omitempty"`
// Suspend tells the controller to suspend the reconciliation of this
// source.
// +optional
Suspend bool `json:"suspend,omitempty"`
// Verify contains the secret name containing the trusted public keys
// used to verify the signature and specifies which provider to use to check
// whether OCI image is authentic.
// This field is only supported when using HelmRepository source with spec.type 'oci'.
// Chart dependencies, which are not bundled in the umbrella chart artifact, are not verified.
// +optional
Verify *OCIRepositoryVerification `json:"verify,omitempty"`
}
const (
// ReconcileStrategyChartVersion reconciles when the version of the Helm chart is different.
ReconcileStrategyChartVersion string = "ChartVersion"
// ReconcileStrategyRevision reconciles when the Revision of the source is different.
ReconcileStrategyRevision string = "Revision"
)
// LocalHelmChartSourceReference contains enough information to let you locate
// the typed referenced object at namespace level.
type LocalHelmChartSourceReference struct {
// APIVersion of the referent.
// +optional
APIVersion string `json:"apiVersion,omitempty"`
// Kind of the referent, valid values are ('HelmRepository', 'GitRepository',
// 'Bucket').
// +kubebuilder:validation:Enum=HelmRepository;GitRepository;Bucket
// +required
Kind string `json:"kind"`
// Name of the referent.
// +required
Name string `json:"name"`
}
// HelmChartStatus records the observed state of the HelmChart.
type HelmChartStatus struct {
// ObservedGeneration is the last observed generation of the HelmChart
// object.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
// ObservedSourceArtifactRevision is the last observed Artifact.Revision
// of the HelmChartSpec.SourceRef.
// +optional
ObservedSourceArtifactRevision string `json:"observedSourceArtifactRevision,omitempty"`
// ObservedChartName is the last observed chart name as specified by the
// resolved chart reference.
// +optional
ObservedChartName string `json:"observedChartName,omitempty"`
// ObservedValuesFiles are the observed value files of the last successful
// reconciliation.
// It matches the chart in the last successfully reconciled artifact.
// +optional
ObservedValuesFiles []string `json:"observedValuesFiles,omitempty"`
// Conditions holds the conditions for the HelmChart.
// +optional
Conditions []metav1.Condition `json:"conditions,omitempty"`
// URL is the dynamic fetch link for the latest Artifact.
// It is provided on a "best effort" basis, and using the precise
// BucketStatus.Artifact data is recommended.
// +optional
URL string `json:"url,omitempty"`
// Artifact represents the output of the last successful reconciliation.
// +optional
Artifact *Artifact `json:"artifact,omitempty"`
meta.ReconcileRequestStatus `json:",inline"`
}
const (
// ChartPullSucceededReason signals that the pull of the Helm chart
// succeeded.
ChartPullSucceededReason string = "ChartPullSucceeded"
// ChartPackageSucceededReason signals that the package of the Helm
// chart succeeded.
ChartPackageSucceededReason string = "ChartPackageSucceeded"
)
// GetConditions returns the status conditions of the object.
func (in HelmChart) GetConditions() []metav1.Condition {
return in.Status.Conditions
}
// SetConditions sets the status conditions on the object.
func (in *HelmChart) SetConditions(conditions []metav1.Condition) {
in.Status.Conditions = conditions
}
// GetRequeueAfter returns the duration after which the source must be
// reconciled again.
func (in HelmChart) GetRequeueAfter() time.Duration {
return in.Spec.Interval.Duration
}
// GetArtifact returns the latest artifact from the source if present in the
// status sub-resource.
func (in *HelmChart) GetArtifact() *Artifact {
return in.Status.Artifact
}
// GetValuesFiles returns a merged list of HelmChartSpec.ValuesFiles.
func (in *HelmChart) GetValuesFiles() []string {
return in.Spec.ValuesFiles
}
// +genclient
// +kubebuilder:storageversion
// +kubebuilder:object:root=true
// +kubebuilder:resource:shortName=hc
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Chart",type=string,JSONPath=`.spec.chart`
// +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.spec.version`
// +kubebuilder:printcolumn:name="Source Kind",type=string,JSONPath=`.spec.sourceRef.kind`
// +kubebuilder:printcolumn:name="Source Name",type=string,JSONPath=`.spec.sourceRef.name`
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
// HelmChart is the Schema for the helmcharts API.
type HelmChart struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec HelmChartSpec `json:"spec,omitempty"`
// +kubebuilder:default={"observedGeneration":-1}
Status HelmChartStatus `json:"status,omitempty"`
}
// HelmChartList contains a list of HelmChart objects.
// +kubebuilder:object:root=true
type HelmChartList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []HelmChart `json:"items"`
}
func init() {
SchemeBuilder.Register(&HelmChart{}, &HelmChartList{})
}

View File

@ -1,228 +0,0 @@
/*
Copyright 2024 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/fluxcd/pkg/apis/acl"
"github.com/fluxcd/pkg/apis/meta"
)
const (
// HelmRepositoryKind is the string representation of a HelmRepository.
HelmRepositoryKind = "HelmRepository"
// HelmRepositoryURLIndexKey is the key used for indexing HelmRepository
// objects by their HelmRepositorySpec.URL.
HelmRepositoryURLIndexKey = ".metadata.helmRepositoryURL"
// HelmRepositoryTypeDefault is the default HelmRepository type.
// It is used when no type is specified and corresponds to a Helm repository.
HelmRepositoryTypeDefault = "default"
// HelmRepositoryTypeOCI is the type for an OCI repository.
HelmRepositoryTypeOCI = "oci"
)
// HelmRepositorySpec specifies the required configuration to produce an
// Artifact for a Helm repository index YAML.
type HelmRepositorySpec struct {
// URL of the Helm repository, a valid URL contains at least a protocol and
// host.
// +kubebuilder:validation:Pattern="^(http|https|oci)://.*$"
// +required
URL string `json:"url"`
// SecretRef specifies the Secret containing authentication credentials
// for the HelmRepository.
// For HTTP/S basic auth the secret must contain 'username' and 'password'
// fields.
// Support for TLS auth using the 'certFile' and 'keyFile', and/or 'caFile'
// keys is deprecated. Please use `.spec.certSecretRef` instead.
// +optional
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
// CertSecretRef can be given the name of a Secret containing
// either or both of
//
// - a PEM-encoded client certificate (`tls.crt`) and private
// key (`tls.key`);
// - a PEM-encoded CA certificate (`ca.crt`)
//
// and whichever are supplied, will be used for connecting to the
// registry. The client cert and key are useful if you are
// authenticating with a certificate; the CA cert is useful if
// you are using a self-signed server certificate. The Secret must
// be of type `Opaque` or `kubernetes.io/tls`.
//
// It takes precedence over the values specified in the Secret referred
// to by `.spec.secretRef`.
// +optional
CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"`
// PassCredentials allows the credentials from the SecretRef to be passed
// on to a host that does not match the host as defined in URL.
// This may be required if the host of the advertised chart URLs in the
// index differ from the defined URL.
// Enabling this should be done with caution, as it can potentially result
// in credentials getting stolen in a MITM-attack.
// +optional
PassCredentials bool `json:"passCredentials,omitempty"`
// Interval at which the HelmRepository URL is checked for updates.
// This interval is approximate and may be subject to jitter to ensure
// efficient use of resources.
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
// +optional
Interval metav1.Duration `json:"interval,omitempty"`
// Insecure allows connecting to a non-TLS HTTP container registry.
// This field is only taken into account if the .spec.type field is set to 'oci'.
// +optional
Insecure bool `json:"insecure,omitempty"`
// Timeout is used for the index fetch operation for an HTTPS helm repository,
// and for remote OCI Repository operations like pulling for an OCI helm
// chart by the associated HelmChart.
// Its default value is 60s.
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$"
// +optional
Timeout *metav1.Duration `json:"timeout,omitempty"`
// Suspend tells the controller to suspend the reconciliation of this
// HelmRepository.
// +optional
Suspend bool `json:"suspend,omitempty"`
// AccessFrom specifies an Access Control List for allowing cross-namespace
// references to this object.
// NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092
// +optional
AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"`
// Type of the HelmRepository.
// When this field is set to "oci", the URL field value must be prefixed with "oci://".
// +kubebuilder:validation:Enum=default;oci
// +optional
Type string `json:"type,omitempty"`
// Provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'.
// This field is optional, and only taken into account if the .spec.type field is set to 'oci'.
// When not specified, defaults to 'generic'.
// +kubebuilder:validation:Enum=generic;aws;azure;gcp
// +kubebuilder:default:=generic
// +optional
Provider string `json:"provider,omitempty"`
}
// HelmRepositoryStatus records the observed state of the HelmRepository.
type HelmRepositoryStatus struct {
// ObservedGeneration is the last observed generation of the HelmRepository
// object.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
// Conditions holds the conditions for the HelmRepository.
// +optional
Conditions []metav1.Condition `json:"conditions,omitempty"`
// URL is the dynamic fetch link for the latest Artifact.
// It is provided on a "best effort" basis, and using the precise
// HelmRepositoryStatus.Artifact data is recommended.
// +optional
URL string `json:"url,omitempty"`
// Artifact represents the last successful HelmRepository reconciliation.
// +optional
Artifact *Artifact `json:"artifact,omitempty"`
meta.ReconcileRequestStatus `json:",inline"`
}
const (
// IndexationFailedReason signals that the HelmRepository index fetch
// failed.
IndexationFailedReason string = "IndexationFailed"
)
// GetConditions returns the status conditions of the object.
func (in HelmRepository) GetConditions() []metav1.Condition {
return in.Status.Conditions
}
// SetConditions sets the status conditions on the object.
func (in *HelmRepository) SetConditions(conditions []metav1.Condition) {
in.Status.Conditions = conditions
}
// GetRequeueAfter returns the duration after which the source must be
// reconciled again.
func (in HelmRepository) GetRequeueAfter() time.Duration {
if in.Spec.Interval.Duration != 0 {
return in.Spec.Interval.Duration
}
return time.Minute
}
// GetTimeout returns the timeout duration used for various operations related
// to this HelmRepository.
func (in HelmRepository) GetTimeout() time.Duration {
if in.Spec.Timeout != nil {
return in.Spec.Timeout.Duration
}
return time.Minute
}
// GetArtifact returns the latest artifact from the source if present in the
// status sub-resource.
func (in *HelmRepository) GetArtifact() *Artifact {
return in.Status.Artifact
}
// +genclient
// +kubebuilder:storageversion
// +kubebuilder:object:root=true
// +kubebuilder:resource:shortName=helmrepo
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url`
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
// HelmRepository is the Schema for the helmrepositories API.
type HelmRepository struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec HelmRepositorySpec `json:"spec,omitempty"`
// +kubebuilder:default={"observedGeneration":-1}
Status HelmRepositoryStatus `json:"status,omitempty"`
}
// HelmRepositoryList contains a list of HelmRepository objects.
// +kubebuilder:object:root=true
type HelmRepositoryList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []HelmRepository `json:"items"`
}
func init() {
SchemeBuilder.Register(&HelmRepository{}, &HelmRepositoryList{})
}

View File

@ -1,296 +0,0 @@
/*
Copyright 2025 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/fluxcd/pkg/apis/meta"
)
const (
// OCIRepositoryKind is the string representation of an OCIRepository.
OCIRepositoryKind = "OCIRepository"
// OCIRepositoryPrefix is the prefix used for OCIRepository URLs.
OCIRepositoryPrefix = "oci://"
// GenericOCIProvider provides support for authentication using static credentials
// for any OCI compatible API such as Docker Registry, GitHub Container Registry,
// Docker Hub, Quay, etc.
GenericOCIProvider string = "generic"
// AmazonOCIProvider provides support for OCI authentication using AWS IRSA.
AmazonOCIProvider string = "aws"
// GoogleOCIProvider provides support for OCI authentication using GCP workload identity.
GoogleOCIProvider string = "gcp"
// AzureOCIProvider provides support for OCI authentication using a Azure Service Principal,
// Managed Identity or Shared Key.
AzureOCIProvider string = "azure"
// OCILayerExtract defines the operation type for extracting the content from an OCI artifact layer.
OCILayerExtract = "extract"
// OCILayerCopy defines the operation type for copying the content from an OCI artifact layer.
OCILayerCopy = "copy"
)
// OCIRepositorySpec defines the desired state of OCIRepository
type OCIRepositorySpec struct {
// URL is a reference to an OCI artifact repository hosted
// on a remote container registry.
// +kubebuilder:validation:Pattern="^oci://.*$"
// +required
URL string `json:"url"`
// The OCI reference to pull and monitor for changes,
// defaults to the latest tag.
// +optional
Reference *OCIRepositoryRef `json:"ref,omitempty"`
// LayerSelector specifies which layer should be extracted from the OCI artifact.
// When not specified, the first layer found in the artifact is selected.
// +optional
LayerSelector *OCILayerSelector `json:"layerSelector,omitempty"`
// The provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'.
// When not specified, defaults to 'generic'.
// +kubebuilder:validation:Enum=generic;aws;azure;gcp
// +kubebuilder:default:=generic
// +optional
Provider string `json:"provider,omitempty"`
// SecretRef contains the secret name containing the registry login
// credentials to resolve image metadata.
// The secret must be of type kubernetes.io/dockerconfigjson.
// +optional
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
// Verify contains the secret name containing the trusted public keys
// used to verify the signature and specifies which provider to use to check
// whether OCI image is authentic.
// +optional
Verify *OCIRepositoryVerification `json:"verify,omitempty"`
// ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate
// the image pull if the service account has attached pull secrets. For more information:
// https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account
// +optional
ServiceAccountName string `json:"serviceAccountName,omitempty"`
// CertSecretRef can be given the name of a Secret containing
// either or both of
//
// - a PEM-encoded client certificate (`tls.crt`) and private
// key (`tls.key`);
// - a PEM-encoded CA certificate (`ca.crt`)
//
// and whichever are supplied, will be used for connecting to the
// registry. The client cert and key are useful if you are
// authenticating with a certificate; the CA cert is useful if
// you are using a self-signed server certificate. The Secret must
// be of type `Opaque` or `kubernetes.io/tls`.
// +optional
CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"`
// ProxySecretRef specifies the Secret containing the proxy configuration
// to use while communicating with the container registry.
// +optional
ProxySecretRef *meta.LocalObjectReference `json:"proxySecretRef,omitempty"`
// Interval at which the OCIRepository URL is checked for updates.
// This interval is approximate and may be subject to jitter to ensure
// efficient use of resources.
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
// +required
Interval metav1.Duration `json:"interval"`
// The timeout for remote OCI Repository operations like pulling, defaults to 60s.
// +kubebuilder:default="60s"
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$"
// +optional
Timeout *metav1.Duration `json:"timeout,omitempty"`
// Ignore overrides the set of excluded patterns in the .sourceignore format
// (which is the same as .gitignore). If not provided, a default will be used,
// consult the documentation for your version to find out what those are.
// +optional
Ignore *string `json:"ignore,omitempty"`
// Insecure allows connecting to a non-TLS HTTP container registry.
// +optional
Insecure bool `json:"insecure,omitempty"`
// This flag tells the controller to suspend the reconciliation of this source.
// +optional
Suspend bool `json:"suspend,omitempty"`
}
// OCIRepositoryRef defines the image reference for the OCIRepository's URL
type OCIRepositoryRef struct {
// Digest is the image digest to pull, takes precedence over SemVer.
// The value should be in the format 'sha256:<HASH>'.
// +optional
Digest string `json:"digest,omitempty"`
// SemVer is the range of tags to pull selecting the latest within
// the range, takes precedence over Tag.
// +optional
SemVer string `json:"semver,omitempty"`
// SemverFilter is a regex pattern to filter the tags within the SemVer range.
// +optional
SemverFilter string `json:"semverFilter,omitempty"`
// Tag is the image tag to pull, defaults to latest.
// +optional
Tag string `json:"tag,omitempty"`
}
// OCILayerSelector specifies which layer should be extracted from an OCI Artifact
type OCILayerSelector struct {
// MediaType specifies the OCI media type of the layer
// which should be extracted from the OCI Artifact. The
// first layer matching this type is selected.
// +optional
MediaType string `json:"mediaType,omitempty"`
// Operation specifies how the selected layer should be processed.
// By default, the layer compressed content is extracted to storage.
// When the operation is set to 'copy', the layer compressed content
// is persisted to storage as it is.
// +kubebuilder:validation:Enum=extract;copy
// +optional
Operation string `json:"operation,omitempty"`
}
// OCIRepositoryStatus defines the observed state of OCIRepository
type OCIRepositoryStatus struct {
// ObservedGeneration is the last observed generation.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
// Conditions holds the conditions for the OCIRepository.
// +optional
Conditions []metav1.Condition `json:"conditions,omitempty"`
// URL is the download link for the artifact output of the last OCI Repository sync.
// +optional
URL string `json:"url,omitempty"`
// Artifact represents the output of the last successful OCI Repository sync.
// +optional
Artifact *Artifact `json:"artifact,omitempty"`
// ObservedIgnore is the observed exclusion patterns used for constructing
// the source artifact.
// +optional
ObservedIgnore *string `json:"observedIgnore,omitempty"`
// ObservedLayerSelector is the observed layer selector used for constructing
// the source artifact.
// +optional
ObservedLayerSelector *OCILayerSelector `json:"observedLayerSelector,omitempty"`
meta.ReconcileRequestStatus `json:",inline"`
}
const (
// OCIPullFailedReason signals that a pull operation failed.
OCIPullFailedReason string = "OCIArtifactPullFailed"
// OCILayerOperationFailedReason signals that an OCI layer operation failed.
OCILayerOperationFailedReason string = "OCIArtifactLayerOperationFailed"
)
// GetConditions returns the status conditions of the object.
func (in OCIRepository) GetConditions() []metav1.Condition {
return in.Status.Conditions
}
// SetConditions sets the status conditions on the object.
func (in *OCIRepository) SetConditions(conditions []metav1.Condition) {
in.Status.Conditions = conditions
}
// GetRequeueAfter returns the duration after which the OCIRepository must be
// reconciled again.
func (in OCIRepository) GetRequeueAfter() time.Duration {
return in.Spec.Interval.Duration
}
// GetArtifact returns the latest Artifact from the OCIRepository if present in
// the status sub-resource.
func (in *OCIRepository) GetArtifact() *Artifact {
return in.Status.Artifact
}
// GetLayerMediaType returns the media type layer selector if found in spec.
func (in *OCIRepository) GetLayerMediaType() string {
if in.Spec.LayerSelector == nil {
return ""
}
return in.Spec.LayerSelector.MediaType
}
// GetLayerOperation returns the layer selector operation (defaults to extract).
func (in *OCIRepository) GetLayerOperation() string {
if in.Spec.LayerSelector == nil || in.Spec.LayerSelector.Operation == "" {
return OCILayerExtract
}
return in.Spec.LayerSelector.Operation
}
// +genclient
// +kubebuilder:storageversion
// +kubebuilder:object:root=true
// +kubebuilder:resource:shortName=ocirepo
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url`
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
// OCIRepository is the Schema for the ocirepositories API
type OCIRepository struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec OCIRepositorySpec `json:"spec,omitempty"`
// +kubebuilder:default={"observedGeneration":-1}
Status OCIRepositoryStatus `json:"status,omitempty"`
}
// OCIRepositoryList contains a list of OCIRepository
// +kubebuilder:object:root=true
type OCIRepositoryList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []OCIRepository `json:"items"`
}
func init() {
SchemeBuilder.Register(&OCIRepository{}, &OCIRepositoryList{})
}

View File

@ -1,56 +0,0 @@
/*
Copyright 2024 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"github.com/fluxcd/pkg/apis/meta"
)
// OCIRepositoryVerification verifies the authenticity of an OCI Artifact
type OCIRepositoryVerification struct {
// Provider specifies the technology used to sign the OCI Artifact.
// +kubebuilder:validation:Enum=cosign;notation
// +kubebuilder:default:=cosign
Provider string `json:"provider"`
// SecretRef specifies the Kubernetes Secret containing the
// trusted public keys.
// +optional
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
// MatchOIDCIdentity specifies the identity matching criteria to use
// while verifying an OCI artifact which was signed using Cosign keyless
// signing. The artifact's identity is deemed to be verified if any of the
// specified matchers match against the identity.
// +optional
MatchOIDCIdentity []OIDCIdentityMatch `json:"matchOIDCIdentity,omitempty"`
}
// OIDCIdentityMatch specifies options for verifying the certificate identity,
// i.e. the issuer and the subject of the certificate.
type OIDCIdentityMatch struct {
// Issuer specifies the regex pattern to match against to verify
// the OIDC issuer in the Fulcio certificate. The pattern must be a
// valid Go regular expression.
// +required
Issuer string `json:"issuer"`
// Subject specifies the regex pattern to match against to verify
// the identity subject in the Fulcio certificate. The pattern must
// be a valid Go regular expression.
// +required
Subject string `json:"subject"`
}

View File

@ -1,45 +0,0 @@
/*
Copyright 2023 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"time"
"k8s.io/apimachinery/pkg/runtime"
)
const (
// SourceIndexKey is the key used for indexing objects based on their
// referenced Source.
SourceIndexKey string = ".metadata.source"
)
// Source interface must be supported by all API types.
// Source is the interface that provides generic access to the Artifact and
// interval. It must be supported by all kinds of the source.toolkit.fluxcd.io
// API group.
//
// +k8s:deepcopy-gen=false
type Source interface {
runtime.Object
// GetRequeueAfter returns the duration after which the source must be
// reconciled again.
GetRequeueAfter() time.Duration
// GetArtifact returns the latest artifact from the source if present in
// the status sub-resource.
GetArtifact() *Artifact
}

View File

@ -1,920 +0,0 @@
//go:build !ignore_autogenerated
/*
Copyright 2024 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package v1
import (
"github.com/fluxcd/pkg/apis/acl"
"github.com/fluxcd/pkg/apis/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Artifact) DeepCopyInto(out *Artifact) {
*out = *in
in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
if in.Size != nil {
in, out := &in.Size, &out.Size
*out = new(int64)
**out = **in
}
if in.Metadata != nil {
in, out := &in.Metadata, &out.Metadata
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Artifact.
func (in *Artifact) DeepCopy() *Artifact {
if in == nil {
return nil
}
out := new(Artifact)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Bucket) DeepCopyInto(out *Bucket) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Bucket.
func (in *Bucket) DeepCopy() *Bucket {
if in == nil {
return nil
}
out := new(Bucket)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Bucket) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BucketList) DeepCopyInto(out *BucketList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Bucket, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketList.
func (in *BucketList) DeepCopy() *BucketList {
if in == nil {
return nil
}
out := new(BucketList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *BucketList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BucketSTSSpec) DeepCopyInto(out *BucketSTSSpec) {
*out = *in
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
if in.CertSecretRef != nil {
in, out := &in.CertSecretRef, &out.CertSecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketSTSSpec.
func (in *BucketSTSSpec) DeepCopy() *BucketSTSSpec {
if in == nil {
return nil
}
out := new(BucketSTSSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BucketSpec) DeepCopyInto(out *BucketSpec) {
*out = *in
if in.STS != nil {
in, out := &in.STS, &out.STS
*out = new(BucketSTSSpec)
(*in).DeepCopyInto(*out)
}
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
if in.CertSecretRef != nil {
in, out := &in.CertSecretRef, &out.CertSecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
if in.ProxySecretRef != nil {
in, out := &in.ProxySecretRef, &out.ProxySecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
out.Interval = in.Interval
if in.Timeout != nil {
in, out := &in.Timeout, &out.Timeout
*out = new(metav1.Duration)
**out = **in
}
if in.Ignore != nil {
in, out := &in.Ignore, &out.Ignore
*out = new(string)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketSpec.
func (in *BucketSpec) DeepCopy() *BucketSpec {
if in == nil {
return nil
}
out := new(BucketSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BucketStatus) DeepCopyInto(out *BucketStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]metav1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Artifact != nil {
in, out := &in.Artifact, &out.Artifact
*out = new(Artifact)
(*in).DeepCopyInto(*out)
}
if in.ObservedIgnore != nil {
in, out := &in.ObservedIgnore, &out.ObservedIgnore
*out = new(string)
**out = **in
}
out.ReconcileRequestStatus = in.ReconcileRequestStatus
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketStatus.
func (in *BucketStatus) DeepCopy() *BucketStatus {
if in == nil {
return nil
}
out := new(BucketStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GitRepository) DeepCopyInto(out *GitRepository) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepository.
func (in *GitRepository) DeepCopy() *GitRepository {
if in == nil {
return nil
}
out := new(GitRepository)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *GitRepository) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GitRepositoryInclude) DeepCopyInto(out *GitRepositoryInclude) {
*out = *in
out.GitRepositoryRef = in.GitRepositoryRef
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryInclude.
func (in *GitRepositoryInclude) DeepCopy() *GitRepositoryInclude {
if in == nil {
return nil
}
out := new(GitRepositoryInclude)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GitRepositoryList) DeepCopyInto(out *GitRepositoryList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]GitRepository, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryList.
func (in *GitRepositoryList) DeepCopy() *GitRepositoryList {
if in == nil {
return nil
}
out := new(GitRepositoryList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *GitRepositoryList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GitRepositoryRef) DeepCopyInto(out *GitRepositoryRef) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryRef.
func (in *GitRepositoryRef) DeepCopy() *GitRepositoryRef {
if in == nil {
return nil
}
out := new(GitRepositoryRef)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GitRepositorySpec) DeepCopyInto(out *GitRepositorySpec) {
*out = *in
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
out.Interval = in.Interval
if in.Timeout != nil {
in, out := &in.Timeout, &out.Timeout
*out = new(metav1.Duration)
**out = **in
}
if in.Reference != nil {
in, out := &in.Reference, &out.Reference
*out = new(GitRepositoryRef)
**out = **in
}
if in.Verification != nil {
in, out := &in.Verification, &out.Verification
*out = new(GitRepositoryVerification)
**out = **in
}
if in.ProxySecretRef != nil {
in, out := &in.ProxySecretRef, &out.ProxySecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
if in.Ignore != nil {
in, out := &in.Ignore, &out.Ignore
*out = new(string)
**out = **in
}
if in.Include != nil {
in, out := &in.Include, &out.Include
*out = make([]GitRepositoryInclude, len(*in))
copy(*out, *in)
}
if in.SparseCheckout != nil {
in, out := &in.SparseCheckout, &out.SparseCheckout
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositorySpec.
func (in *GitRepositorySpec) DeepCopy() *GitRepositorySpec {
if in == nil {
return nil
}
out := new(GitRepositorySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GitRepositoryStatus) DeepCopyInto(out *GitRepositoryStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]metav1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Artifact != nil {
in, out := &in.Artifact, &out.Artifact
*out = new(Artifact)
(*in).DeepCopyInto(*out)
}
if in.IncludedArtifacts != nil {
in, out := &in.IncludedArtifacts, &out.IncludedArtifacts
*out = make([]*Artifact, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(Artifact)
(*in).DeepCopyInto(*out)
}
}
}
if in.ObservedIgnore != nil {
in, out := &in.ObservedIgnore, &out.ObservedIgnore
*out = new(string)
**out = **in
}
if in.ObservedInclude != nil {
in, out := &in.ObservedInclude, &out.ObservedInclude
*out = make([]GitRepositoryInclude, len(*in))
copy(*out, *in)
}
if in.ObservedSparseCheckout != nil {
in, out := &in.ObservedSparseCheckout, &out.ObservedSparseCheckout
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.SourceVerificationMode != nil {
in, out := &in.SourceVerificationMode, &out.SourceVerificationMode
*out = new(GitVerificationMode)
**out = **in
}
out.ReconcileRequestStatus = in.ReconcileRequestStatus
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryStatus.
func (in *GitRepositoryStatus) DeepCopy() *GitRepositoryStatus {
if in == nil {
return nil
}
out := new(GitRepositoryStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GitRepositoryVerification) DeepCopyInto(out *GitRepositoryVerification) {
*out = *in
out.SecretRef = in.SecretRef
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryVerification.
func (in *GitRepositoryVerification) DeepCopy() *GitRepositoryVerification {
if in == nil {
return nil
}
out := new(GitRepositoryVerification)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HelmChart) DeepCopyInto(out *HelmChart) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChart.
func (in *HelmChart) DeepCopy() *HelmChart {
if in == nil {
return nil
}
out := new(HelmChart)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *HelmChart) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HelmChartList) DeepCopyInto(out *HelmChartList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]HelmChart, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartList.
func (in *HelmChartList) DeepCopy() *HelmChartList {
if in == nil {
return nil
}
out := new(HelmChartList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *HelmChartList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HelmChartSpec) DeepCopyInto(out *HelmChartSpec) {
*out = *in
out.SourceRef = in.SourceRef
out.Interval = in.Interval
if in.ValuesFiles != nil {
in, out := &in.ValuesFiles, &out.ValuesFiles
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Verify != nil {
in, out := &in.Verify, &out.Verify
*out = new(OCIRepositoryVerification)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartSpec.
func (in *HelmChartSpec) DeepCopy() *HelmChartSpec {
if in == nil {
return nil
}
out := new(HelmChartSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HelmChartStatus) DeepCopyInto(out *HelmChartStatus) {
*out = *in
if in.ObservedValuesFiles != nil {
in, out := &in.ObservedValuesFiles, &out.ObservedValuesFiles
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]metav1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Artifact != nil {
in, out := &in.Artifact, &out.Artifact
*out = new(Artifact)
(*in).DeepCopyInto(*out)
}
out.ReconcileRequestStatus = in.ReconcileRequestStatus
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartStatus.
func (in *HelmChartStatus) DeepCopy() *HelmChartStatus {
if in == nil {
return nil
}
out := new(HelmChartStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HelmRepository) DeepCopyInto(out *HelmRepository) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepository.
func (in *HelmRepository) DeepCopy() *HelmRepository {
if in == nil {
return nil
}
out := new(HelmRepository)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *HelmRepository) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HelmRepositoryList) DeepCopyInto(out *HelmRepositoryList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]HelmRepository, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepositoryList.
func (in *HelmRepositoryList) DeepCopy() *HelmRepositoryList {
if in == nil {
return nil
}
out := new(HelmRepositoryList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *HelmRepositoryList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HelmRepositorySpec) DeepCopyInto(out *HelmRepositorySpec) {
*out = *in
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
if in.CertSecretRef != nil {
in, out := &in.CertSecretRef, &out.CertSecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
out.Interval = in.Interval
if in.Timeout != nil {
in, out := &in.Timeout, &out.Timeout
*out = new(metav1.Duration)
**out = **in
}
if in.AccessFrom != nil {
in, out := &in.AccessFrom, &out.AccessFrom
*out = new(acl.AccessFrom)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepositorySpec.
func (in *HelmRepositorySpec) DeepCopy() *HelmRepositorySpec {
if in == nil {
return nil
}
out := new(HelmRepositorySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HelmRepositoryStatus) DeepCopyInto(out *HelmRepositoryStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]metav1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Artifact != nil {
in, out := &in.Artifact, &out.Artifact
*out = new(Artifact)
(*in).DeepCopyInto(*out)
}
out.ReconcileRequestStatus = in.ReconcileRequestStatus
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepositoryStatus.
func (in *HelmRepositoryStatus) DeepCopy() *HelmRepositoryStatus {
if in == nil {
return nil
}
out := new(HelmRepositoryStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LocalHelmChartSourceReference) DeepCopyInto(out *LocalHelmChartSourceReference) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalHelmChartSourceReference.
func (in *LocalHelmChartSourceReference) DeepCopy() *LocalHelmChartSourceReference {
if in == nil {
return nil
}
out := new(LocalHelmChartSourceReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OCILayerSelector) DeepCopyInto(out *OCILayerSelector) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCILayerSelector.
func (in *OCILayerSelector) DeepCopy() *OCILayerSelector {
if in == nil {
return nil
}
out := new(OCILayerSelector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OCIRepository) DeepCopyInto(out *OCIRepository) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepository.
func (in *OCIRepository) DeepCopy() *OCIRepository {
if in == nil {
return nil
}
out := new(OCIRepository)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *OCIRepository) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OCIRepositoryList) DeepCopyInto(out *OCIRepositoryList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]OCIRepository, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryList.
func (in *OCIRepositoryList) DeepCopy() *OCIRepositoryList {
if in == nil {
return nil
}
out := new(OCIRepositoryList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *OCIRepositoryList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OCIRepositoryRef) DeepCopyInto(out *OCIRepositoryRef) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryRef.
func (in *OCIRepositoryRef) DeepCopy() *OCIRepositoryRef {
if in == nil {
return nil
}
out := new(OCIRepositoryRef)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OCIRepositorySpec) DeepCopyInto(out *OCIRepositorySpec) {
*out = *in
if in.Reference != nil {
in, out := &in.Reference, &out.Reference
*out = new(OCIRepositoryRef)
**out = **in
}
if in.LayerSelector != nil {
in, out := &in.LayerSelector, &out.LayerSelector
*out = new(OCILayerSelector)
**out = **in
}
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
if in.Verify != nil {
in, out := &in.Verify, &out.Verify
*out = new(OCIRepositoryVerification)
(*in).DeepCopyInto(*out)
}
if in.CertSecretRef != nil {
in, out := &in.CertSecretRef, &out.CertSecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
if in.ProxySecretRef != nil {
in, out := &in.ProxySecretRef, &out.ProxySecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
out.Interval = in.Interval
if in.Timeout != nil {
in, out := &in.Timeout, &out.Timeout
*out = new(metav1.Duration)
**out = **in
}
if in.Ignore != nil {
in, out := &in.Ignore, &out.Ignore
*out = new(string)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositorySpec.
func (in *OCIRepositorySpec) DeepCopy() *OCIRepositorySpec {
if in == nil {
return nil
}
out := new(OCIRepositorySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OCIRepositoryStatus) DeepCopyInto(out *OCIRepositoryStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]metav1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Artifact != nil {
in, out := &in.Artifact, &out.Artifact
*out = new(Artifact)
(*in).DeepCopyInto(*out)
}
if in.ObservedIgnore != nil {
in, out := &in.ObservedIgnore, &out.ObservedIgnore
*out = new(string)
**out = **in
}
if in.ObservedLayerSelector != nil {
in, out := &in.ObservedLayerSelector, &out.ObservedLayerSelector
*out = new(OCILayerSelector)
**out = **in
}
out.ReconcileRequestStatus = in.ReconcileRequestStatus
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryStatus.
func (in *OCIRepositoryStatus) DeepCopy() *OCIRepositoryStatus {
if in == nil {
return nil
}
out := new(OCIRepositoryStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OCIRepositoryVerification) DeepCopyInto(out *OCIRepositoryVerification) {
*out = *in
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
if in.MatchOIDCIdentity != nil {
in, out := &in.MatchOIDCIdentity, &out.MatchOIDCIdentity
*out = make([]OIDCIdentityMatch, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryVerification.
func (in *OCIRepositoryVerification) DeepCopy() *OCIRepositoryVerification {
if in == nil {
return nil
}
out := new(OCIRepositoryVerification)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OIDCIdentityMatch) DeepCopyInto(out *OIDCIdentityMatch) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCIdentityMatch.
func (in *OIDCIdentityMatch) DeepCopy() *OIDCIdentityMatch {
if in == nil {
return nil
}
out := new(OIDCIdentityMatch)
in.DeepCopyInto(out)
return out
}

View File

@ -193,9 +193,9 @@ func (in *Bucket) GetInterval() metav1.Duration {
} }
// +genclient // +genclient
// +genclient:Namespaced
// +kubebuilder:object:root=true // +kubebuilder:object:root=true
// +kubebuilder:subresource:status // +kubebuilder:subresource:status
// +kubebuilder:deprecatedversion:warning="v1beta1 Bucket is deprecated, upgrade to v1"
// +kubebuilder:printcolumn:name="Endpoint",type=string,JSONPath=`.spec.endpoint` // +kubebuilder:printcolumn:name="Endpoint",type=string,JSONPath=`.spec.endpoint`
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" // +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""

View File

@ -37,15 +37,15 @@ const (
// GitRepositorySpec defines the desired state of a Git repository. // GitRepositorySpec defines the desired state of a Git repository.
type GitRepositorySpec struct { type GitRepositorySpec struct {
// The repository URL, can be a HTTP/S or SSH address. // The repository URL, can be a HTTP/S or SSH address.
// +kubebuilder:validation:Pattern="^(http|https|ssh)://.*$" // +kubebuilder:validation:Pattern="^(http|https|ssh)://"
// +required // +required
URL string `json:"url"` URL string `json:"url"`
// The secret name containing the Git credentials. // The secret name containing the Git credentials.
// For HTTPS repositories the secret must contain username and password // For HTTPS repositories the secret must contain username and password
// fields. // fields.
// For SSH repositories the secret must contain identity and known_hosts // For SSH repositories the secret must contain identity, identity.pub and
// fields. // known_hosts fields.
// +optional // +optional
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
@ -265,10 +265,10 @@ func (in *GitRepository) GetInterval() metav1.Duration {
} }
// +genclient // +genclient
// +genclient:Namespaced
// +kubebuilder:object:root=true // +kubebuilder:object:root=true
// +kubebuilder:resource:shortName=gitrepo // +kubebuilder:resource:shortName=gitrepo
// +kubebuilder:subresource:status // +kubebuilder:subresource:status
// +kubebuilder:deprecatedversion:warning="v1beta1 GitRepository is deprecated, upgrade to v1"
// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url` // +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url`
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" // +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""

View File

@ -231,10 +231,10 @@ func (in *HelmChart) GetValuesFiles() []string {
} }
// +genclient // +genclient
// +genclient:Namespaced
// +kubebuilder:object:root=true // +kubebuilder:object:root=true
// +kubebuilder:resource:shortName=hc // +kubebuilder:resource:shortName=hc
// +kubebuilder:subresource:status // +kubebuilder:subresource:status
// +kubebuilder:deprecatedversion:warning="v1beta1 HelmChart is deprecated, upgrade to v1"
// +kubebuilder:printcolumn:name="Chart",type=string,JSONPath=`.spec.chart` // +kubebuilder:printcolumn:name="Chart",type=string,JSONPath=`.spec.chart`
// +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.spec.version` // +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.spec.version`
// +kubebuilder:printcolumn:name="Source Kind",type=string,JSONPath=`.spec.sourceRef.kind` // +kubebuilder:printcolumn:name="Source Kind",type=string,JSONPath=`.spec.sourceRef.kind`

View File

@ -43,7 +43,7 @@ type HelmRepositorySpec struct {
// For HTTP/S basic auth the secret must contain username and // For HTTP/S basic auth the secret must contain username and
// password fields. // password fields.
// For TLS the secret must contain a certFile and keyFile, and/or // For TLS the secret must contain a certFile and keyFile, and/or
// caFile fields. // caCert fields.
// +optional // +optional
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
@ -181,10 +181,10 @@ func (in *HelmRepository) GetInterval() metav1.Duration {
} }
// +genclient // +genclient
// +genclient:Namespaced
// +kubebuilder:object:root=true // +kubebuilder:object:root=true
// +kubebuilder:resource:shortName=helmrepo // +kubebuilder:resource:shortName=helmrepo
// +kubebuilder:subresource:status // +kubebuilder:subresource:status
// +kubebuilder:deprecatedversion:warning="v1beta1 HelmRepository is deprecated, upgrade to v1"
// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url` // +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url`
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" // +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description="" // +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""

View File

@ -1,7 +1,8 @@
//go:build !ignore_autogenerated //go:build !ignore_autogenerated
// +build !ignore_autogenerated
/* /*
Copyright 2024 The Flux authors Copyright 2022 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@ -18,16 +18,12 @@ package v1beta2
import ( import (
"path" "path"
"regexp"
"strings" "strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
) )
// Artifact represents the output of a Source reconciliation. // Artifact represents the output of a Source reconciliation.
//
// Deprecated: use Artifact from api/v1 instead. This type will be removed in
// a future release.
type Artifact struct { type Artifact struct {
// Path is the relative file path of the Artifact. It can be used to locate // Path is the relative file path of the Artifact. It can be used to locate
// the file in the root of the Artifact storage on the local file system of // the file in the root of the Artifact storage on the local file system of
@ -47,14 +43,8 @@ type Artifact struct {
Revision string `json:"revision"` Revision string `json:"revision"`
// Checksum is the SHA256 checksum of the Artifact file. // Checksum is the SHA256 checksum of the Artifact file.
// Deprecated: use Artifact.Digest instead.
// +optional // +optional
Checksum string `json:"checksum,omitempty"` Checksum string `json:"checksum"`
// Digest is the digest of the file in the form of '<algorithm>:<checksum>'.
// +optional
// +kubebuilder:validation:Pattern="^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$"
Digest string `json:"digest,omitempty"`
// LastUpdateTime is the timestamp corresponding to the last update of the // LastUpdateTime is the timestamp corresponding to the last update of the
// Artifact. // Artifact.
@ -64,28 +54,15 @@ type Artifact struct {
// Size is the number of bytes in the file. // Size is the number of bytes in the file.
// +optional // +optional
Size *int64 `json:"size,omitempty"` Size *int64 `json:"size,omitempty"`
// Metadata holds upstream information such as OCI annotations.
// +optional
Metadata map[string]string `json:"metadata,omitempty"`
} }
// HasRevision returns if the given revision matches the current Revision of // HasRevision returns true if the given revision matches the current Revision
// the Artifact. // of the Artifact.
func (in *Artifact) HasRevision(revision string) bool { func (in *Artifact) HasRevision(revision string) bool {
if in == nil { if in == nil {
return false return false
} }
return TransformLegacyRevision(in.Revision) == TransformLegacyRevision(revision) return in.Revision == revision
}
// HasChecksum returns if the given checksum matches the current Checksum of
// the Artifact.
func (in *Artifact) HasChecksum(checksum string) bool {
if in == nil {
return false
}
return in.Checksum == checksum
} }
// ArtifactDir returns the artifact dir path in the form of // ArtifactDir returns the artifact dir path in the form of
@ -100,60 +77,3 @@ func ArtifactDir(kind, namespace, name string) string {
func ArtifactPath(kind, namespace, name, filename string) string { func ArtifactPath(kind, namespace, name, filename string) string {
return path.Join(ArtifactDir(kind, namespace, name), filename) return path.Join(ArtifactDir(kind, namespace, name), filename)
} }
// TransformLegacyRevision transforms a "legacy" revision string into a "new"
// revision string. It accepts the following formats:
//
// - main/5394cb7f48332b2de7c17dd8b8384bbc84b7e738
// - feature/branch/5394cb7f48332b2de7c17dd8b8384bbc84b7e738
// - HEAD/5394cb7f48332b2de7c17dd8b8384bbc84b7e738
// - tag/55609ff9d959589ed917ce32e6bc0f0a36809565f308602c15c3668965979edc
// - d52bde83c5b2bd0fa7910264e0afc3ac9cfe9b6636ca29c05c09742f01d5a4bd
//
// Which are transformed into the following formats respectively:
//
// - main@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738
// - feature/branch@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738
// - sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738
// - tag@sha256:55609ff9d959589ed917ce32e6bc0f0a36809565f308602c15c3668965979edc
// - sha256:d52bde83c5b2bd0fa7910264e0afc3ac9cfe9b6636ca29c05c09742f01d5a4bd
//
// Deprecated, this function exists for backwards compatibility with existing
// resources, and to provide a transition period. Will be removed in a future
// release.
func TransformLegacyRevision(rev string) string {
if rev != "" && strings.LastIndex(rev, ":") == -1 {
if i := strings.LastIndex(rev, "/"); i >= 0 {
sha := rev[i+1:]
if algo := determineSHAType(sha); algo != "" {
if name := rev[:i]; name != "HEAD" {
return name + "@" + algo + ":" + sha
}
return algo + ":" + sha
}
}
if algo := determineSHAType(rev); algo != "" {
return algo + ":" + rev
}
}
return rev
}
// isAlphaNumHex returns true if the given string only contains 0-9 and a-f
// characters.
var isAlphaNumHex = regexp.MustCompile(`^[0-9a-f]+$`).MatchString
// determineSHAType returns the SHA algorithm used to compute the provided hex.
// The determination is heuristic and based on the length of the hex string. If
// the size is not recognized, an empty string is returned.
func determineSHAType(hex string) string {
if isAlphaNumHex(hex) {
switch len(hex) {
case 40:
return "sha1"
case 64:
return "sha256"
}
}
return ""
}

View File

@ -1,78 +0,0 @@
/*
Copyright 2023 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta2
import "testing"
func TestTransformLegacyRevision(t *testing.T) {
tests := []struct {
rev string
want string
}{
{
rev: "HEAD/5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
want: "sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
},
{
rev: "main/5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
want: "main@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
},
{
rev: "main@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
want: "main@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
},
{
rev: "feature/branch/5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
want: "feature/branch@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
},
{
rev: "feature/branch@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
want: "feature/branch@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
},
{
rev: "5ac85ca617f3774baff4ae0a420b810b2546dbc9af9f346b1d55c5ed9873c55c",
want: "sha256:5ac85ca617f3774baff4ae0a420b810b2546dbc9af9f346b1d55c5ed9873c55c",
},
{
rev: "v1.0.0",
want: "v1.0.0",
},
{
rev: "v1.0.0-rc1",
want: "v1.0.0-rc1",
},
{
rev: "v1.0.0-rc1+metadata",
want: "v1.0.0-rc1+metadata",
},
{
rev: "arbitrary/revision",
want: "arbitrary/revision",
},
{
rev: "5394cb7f48332b2de7c17dd8b8384bbc84b7xxxx",
want: "5394cb7f48332b2de7c17dd8b8384bbc84b7xxxx",
},
}
for _, tt := range tests {
t.Run(tt.rev, func(t *testing.T) {
if got := TransformLegacyRevision(tt.rev); got != tt.want {
t.Errorf("TransformLegacyRevision() = %v, want %v", got, tt.want)
}
})
}
}

View File

@ -23,8 +23,6 @@ import (
"github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/acl"
"github.com/fluxcd/pkg/apis/meta" "github.com/fluxcd/pkg/apis/meta"
apiv1 "github.com/fluxcd/source-controller/api/v1"
) )
const ( const (
@ -33,48 +31,22 @@ const (
) )
const ( const (
// BucketProviderGeneric for any S3 API compatible storage Bucket.
BucketProviderGeneric string = apiv1.BucketProviderGeneric
// BucketProviderAmazon for an AWS S3 object storage Bucket.
// Provides support for retrieving credentials from the AWS EC2 service.
BucketProviderAmazon string = apiv1.BucketProviderAmazon
// BucketProviderGoogle for a Google Cloud Storage Bucket.
// Provides support for authentication using a workload identity.
BucketProviderGoogle string = apiv1.BucketProviderGoogle
// BucketProviderAzure for an Azure Blob Storage Bucket.
// Provides support for authentication using a Service Principal,
// Managed Identity or Shared Key.
BucketProviderAzure string = apiv1.BucketProviderAzure
// GenericBucketProvider for any S3 API compatible storage Bucket. // GenericBucketProvider for any S3 API compatible storage Bucket.
// GenericBucketProvider string = "generic"
// Deprecated: use BucketProviderGeneric.
GenericBucketProvider string = apiv1.BucketProviderGeneric
// AmazonBucketProvider for an AWS S3 object storage Bucket. // AmazonBucketProvider for an AWS S3 object storage Bucket.
// Provides support for retrieving credentials from the AWS EC2 service. // Provides support for retrieving credentials from the AWS EC2 service.
// AmazonBucketProvider string = "aws"
// Deprecated: use BucketProviderAmazon.
AmazonBucketProvider string = apiv1.BucketProviderAmazon
// GoogleBucketProvider for a Google Cloud Storage Bucket. // GoogleBucketProvider for a Google Cloud Storage Bucket.
// Provides support for authentication using a workload identity. // Provides support for authentication using a workload identity.
// GoogleBucketProvider string = "gcp"
// Deprecated: use BucketProviderGoogle.
GoogleBucketProvider string = apiv1.BucketProviderGoogle
// AzureBucketProvider for an Azure Blob Storage Bucket. // AzureBucketProvider for an Azure Blob Storage Bucket.
// Provides support for authentication using a Service Principal, // Provides support for authentication using a Service Principal,
// Managed Identity or Shared Key. // Managed Identity or Shared Key.
// AzureBucketProvider string = "azure"
// Deprecated: use BucketProviderAzure.
AzureBucketProvider string = apiv1.BucketProviderAzure
) )
// BucketSpec specifies the required configuration to produce an Artifact for // BucketSpec specifies the required configuration to produce an Artifact for
// an object storage bucket. // an object storage bucket.
// +kubebuilder:validation:XValidation:rule="self.provider == 'aws' || self.provider == 'generic' || !has(self.sts)", message="STS configuration is only supported for the 'aws' and 'generic' Bucket providers"
// +kubebuilder:validation:XValidation:rule="self.provider != 'aws' || !has(self.sts) || self.sts.provider == 'aws'", message="'aws' is the only supported STS provider for the 'aws' Bucket provider"
// +kubebuilder:validation:XValidation:rule="self.provider != 'generic' || !has(self.sts) || self.sts.provider == 'ldap'", message="'ldap' is the only supported STS provider for the 'generic' Bucket provider"
// +kubebuilder:validation:XValidation:rule="!has(self.sts) || self.sts.provider != 'aws' || !has(self.sts.secretRef)", message="spec.sts.secretRef is not required for the 'aws' STS provider"
// +kubebuilder:validation:XValidation:rule="!has(self.sts) || self.sts.provider != 'aws' || !has(self.sts.certSecretRef)", message="spec.sts.certSecretRef is not required for the 'aws' STS provider"
type BucketSpec struct { type BucketSpec struct {
// Provider of the object storage bucket. // Provider of the object storage bucket.
// Defaults to 'generic', which expects an S3 (API) compatible object // Defaults to 'generic', which expects an S3 (API) compatible object
@ -92,14 +64,6 @@ type BucketSpec struct {
// +required // +required
Endpoint string `json:"endpoint"` Endpoint string `json:"endpoint"`
// STS specifies the required configuration to use a Security Token
// Service for fetching temporary credentials to authenticate in a
// Bucket provider.
//
// This field is only supported for the `aws` and `generic` providers.
// +optional
STS *BucketSTSSpec `json:"sts,omitempty"`
// Insecure allows connecting to a non-TLS HTTP Endpoint. // Insecure allows connecting to a non-TLS HTTP Endpoint.
// +optional // +optional
Insecure bool `json:"insecure,omitempty"` Insecure bool `json:"insecure,omitempty"`
@ -108,49 +72,17 @@ type BucketSpec struct {
// +optional // +optional
Region string `json:"region,omitempty"` Region string `json:"region,omitempty"`
// Prefix to use for server-side filtering of files in the Bucket.
// +optional
Prefix string `json:"prefix,omitempty"`
// SecretRef specifies the Secret containing authentication credentials // SecretRef specifies the Secret containing authentication credentials
// for the Bucket. // for the Bucket.
// +optional // +optional
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
// CertSecretRef can be given the name of a Secret containing // Interval at which to check the Endpoint for updates.
// either or both of
//
// - a PEM-encoded client certificate (`tls.crt`) and private
// key (`tls.key`);
// - a PEM-encoded CA certificate (`ca.crt`)
//
// and whichever are supplied, will be used for connecting to the
// bucket. The client cert and key are useful if you are
// authenticating with a certificate; the CA cert is useful if
// you are using a self-signed server certificate. The Secret must
// be of type `Opaque` or `kubernetes.io/tls`.
//
// This field is only supported for the `generic` provider.
// +optional
CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"`
// ProxySecretRef specifies the Secret containing the proxy configuration
// to use while communicating with the Bucket server.
// +optional
ProxySecretRef *meta.LocalObjectReference `json:"proxySecretRef,omitempty"`
// Interval at which the Bucket Endpoint is checked for updates.
// This interval is approximate and may be subject to jitter to ensure
// efficient use of resources.
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
// +required // +required
Interval metav1.Duration `json:"interval"` Interval metav1.Duration `json:"interval"`
// Timeout for fetch operations, defaults to 60s. // Timeout for fetch operations, defaults to 60s.
// +kubebuilder:default="60s" // +kubebuilder:default="60s"
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$"
// +optional // +optional
Timeout *metav1.Duration `json:"timeout,omitempty"` Timeout *metav1.Duration `json:"timeout,omitempty"`
@ -172,45 +104,6 @@ type BucketSpec struct {
AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"` AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"`
} }
// BucketSTSSpec specifies the required configuration to use a Security Token
// Service for fetching temporary credentials to authenticate in a Bucket
// provider.
type BucketSTSSpec struct {
// Provider of the Security Token Service.
// +kubebuilder:validation:Enum=aws;ldap
// +required
Provider string `json:"provider"`
// Endpoint is the HTTP/S endpoint of the Security Token Service from
// where temporary credentials will be fetched.
// +required
// +kubebuilder:validation:Pattern="^(http|https)://.*$"
Endpoint string `json:"endpoint"`
// SecretRef specifies the Secret containing authentication credentials
// for the STS endpoint. This Secret must contain the fields `username`
// and `password` and is supported only for the `ldap` provider.
// +optional
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
// CertSecretRef can be given the name of a Secret containing
// either or both of
//
// - a PEM-encoded client certificate (`tls.crt`) and private
// key (`tls.key`);
// - a PEM-encoded CA certificate (`ca.crt`)
//
// and whichever are supplied, will be used for connecting to the
// STS endpoint. The client cert and key are useful if you are
// authenticating with a certificate; the CA cert is useful if
// you are using a self-signed server certificate. The Secret must
// be of type `Opaque` or `kubernetes.io/tls`.
//
// This field is only supported for the `ldap` provider.
// +optional
CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"`
}
// BucketStatus records the observed state of a Bucket. // BucketStatus records the observed state of a Bucket.
type BucketStatus struct { type BucketStatus struct {
// ObservedGeneration is the last observed generation of the Bucket object. // ObservedGeneration is the last observed generation of the Bucket object.
@ -229,12 +122,7 @@ type BucketStatus struct {
// Artifact represents the last successful Bucket reconciliation. // Artifact represents the last successful Bucket reconciliation.
// +optional // +optional
Artifact *apiv1.Artifact `json:"artifact,omitempty"` Artifact *Artifact `json:"artifact,omitempty"`
// ObservedIgnore is the observed exclusion patterns used for constructing
// the source artifact.
// +optional
ObservedIgnore *string `json:"observedIgnore,omitempty"`
meta.ReconcileRequestStatus `json:",inline"` meta.ReconcileRequestStatus `json:",inline"`
} }
@ -265,14 +153,15 @@ func (in Bucket) GetRequeueAfter() time.Duration {
} }
// GetArtifact returns the latest artifact from the source if present in the status sub-resource. // GetArtifact returns the latest artifact from the source if present in the status sub-resource.
func (in *Bucket) GetArtifact() *apiv1.Artifact { func (in *Bucket) GetArtifact() *Artifact {
return in.Status.Artifact return in.Status.Artifact
} }
// +genclient // +genclient
// +genclient:Namespaced
// +kubebuilder:storageversion
// +kubebuilder:object:root=true // +kubebuilder:object:root=true
// +kubebuilder:subresource:status // +kubebuilder:subresource:status
// +kubebuilder:deprecatedversion:warning="v1beta2 Bucket is deprecated, upgrade to v1"
// +kubebuilder:printcolumn:name="Endpoint",type=string,JSONPath=`.spec.endpoint` // +kubebuilder:printcolumn:name="Endpoint",type=string,JSONPath=`.spec.endpoint`
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" // +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""

View File

@ -71,10 +71,6 @@ const (
// required fields, or the provided credentials do not match. // required fields, or the provided credentials do not match.
AuthenticationFailedReason string = "AuthenticationFailed" AuthenticationFailedReason string = "AuthenticationFailed"
// VerificationError signals that the Source's verification
// check failed.
VerificationError string = "VerificationError"
// DirCreationFailedReason signals a failure caused by a directory creation // DirCreationFailedReason signals a failure caused by a directory creation
// operation. // operation.
DirCreationFailedReason string = "DirectoryCreationFailed" DirCreationFailedReason string = "DirectoryCreationFailed"

View File

@ -23,8 +23,6 @@ import (
"github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/acl"
"github.com/fluxcd/pkg/apis/meta" "github.com/fluxcd/pkg/apis/meta"
apiv1 "github.com/fluxcd/source-controller/api/v1"
) )
const ( const (
@ -50,29 +48,25 @@ const (
// Artifact for a Git repository. // Artifact for a Git repository.
type GitRepositorySpec struct { type GitRepositorySpec struct {
// URL specifies the Git repository URL, it can be an HTTP/S or SSH address. // URL specifies the Git repository URL, it can be an HTTP/S or SSH address.
// +kubebuilder:validation:Pattern="^(http|https|ssh)://.*$" // +kubebuilder:validation:Pattern="^(http|https|ssh)://"
// +required // +required
URL string `json:"url"` URL string `json:"url"`
// SecretRef specifies the Secret containing authentication credentials for // SecretRef specifies the Secret containing authentication credentials for
// the GitRepository. // the GitRepository.
// For HTTPS repositories the Secret must contain 'username' and 'password' // For HTTPS repositories the Secret must contain 'username' and 'password'
// fields for basic auth or 'bearerToken' field for token auth. // fields.
// For SSH repositories the Secret must contain 'identity' // For SSH repositories the Secret must contain 'identity', 'identity.pub'
// and 'known_hosts' fields. // and 'known_hosts' fields.
// +optional // +optional
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
// Interval at which to check the GitRepository for updates. // Interval at which to check the GitRepository for updates.
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
// +required // +required
Interval metav1.Duration `json:"interval"` Interval metav1.Duration `json:"interval"`
// Timeout for Git operations like cloning, defaults to 60s. // Timeout for Git operations like cloning, defaults to 60s.
// +kubebuilder:default="60s" // +kubebuilder:default="60s"
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$"
// +optional // +optional
Timeout *metav1.Duration `json:"timeout,omitempty"` Timeout *metav1.Duration `json:"timeout,omitempty"`
@ -99,8 +93,6 @@ type GitRepositorySpec struct {
// GitImplementation specifies which Git client library implementation to // GitImplementation specifies which Git client library implementation to
// use. Defaults to 'go-git', valid values are ('go-git', 'libgit2'). // use. Defaults to 'go-git', valid values are ('go-git', 'libgit2').
// Deprecated: gitImplementation is deprecated now that 'go-git' is the
// only supported implementation.
// +kubebuilder:validation:Enum=go-git;libgit2 // +kubebuilder:validation:Enum=go-git;libgit2
// +kubebuilder:default:=go-git // +kubebuilder:default:=go-git
// +optional // +optional
@ -108,6 +100,7 @@ type GitRepositorySpec struct {
// RecurseSubmodules enables the initialization of all submodules within // RecurseSubmodules enables the initialization of all submodules within
// the GitRepository as cloned from the URL, using their default settings. // the GitRepository as cloned from the URL, using their default settings.
// This option is available only when using the 'go-git' GitImplementation.
// +optional // +optional
RecurseSubmodules bool `json:"recurseSubmodules,omitempty"` RecurseSubmodules bool `json:"recurseSubmodules,omitempty"`
@ -157,6 +150,9 @@ func (in *GitRepositoryInclude) GetToPath() string {
// GitRepositoryRef specifies the Git reference to resolve and checkout. // GitRepositoryRef specifies the Git reference to resolve and checkout.
type GitRepositoryRef struct { type GitRepositoryRef struct {
// Branch to check out, defaults to 'master' if no other field is defined. // Branch to check out, defaults to 'master' if no other field is defined.
//
// When GitRepositorySpec.GitImplementation is set to 'go-git', a shallow
// clone of the specified branch is performed.
// +optional // +optional
Branch string `json:"branch,omitempty"` Branch string `json:"branch,omitempty"`
@ -168,17 +164,11 @@ type GitRepositoryRef struct {
// +optional // +optional
SemVer string `json:"semver,omitempty"` SemVer string `json:"semver,omitempty"`
// Name of the reference to check out; takes precedence over Branch, Tag and SemVer.
//
// It must be a valid Git reference: https://git-scm.com/docs/git-check-ref-format#_description
// Examples: "refs/heads/main", "refs/tags/v0.1.0", "refs/pull/420/head", "refs/merge-requests/1/head"
// +optional
Name string `json:"name,omitempty"`
// Commit SHA to check out, takes precedence over all reference fields. // Commit SHA to check out, takes precedence over all reference fields.
// //
// This can be combined with Branch to shallow clone the branch, in which // When GitRepositorySpec.GitImplementation is set to 'go-git', this can be
// the commit is expected to exist. // combined with Branch to shallow clone the branch, in which the commit is
// expected to exist.
// +optional // +optional
Commit string `json:"commit,omitempty"` Commit string `json:"commit,omitempty"`
} }
@ -192,7 +182,7 @@ type GitRepositoryVerification struct {
// SecretRef specifies the Secret containing the public keys of trusted Git // SecretRef specifies the Secret containing the public keys of trusted Git
// authors. // authors.
SecretRef meta.LocalObjectReference `json:"secretRef"` SecretRef meta.LocalObjectReference `json:"secretRef,omitempty"`
} }
// GitRepositoryStatus records the observed state of a Git repository. // GitRepositoryStatus records the observed state of a Git repository.
@ -214,42 +204,12 @@ type GitRepositoryStatus struct {
// Artifact represents the last successful GitRepository reconciliation. // Artifact represents the last successful GitRepository reconciliation.
// +optional // +optional
Artifact *apiv1.Artifact `json:"artifact,omitempty"` Artifact *Artifact `json:"artifact,omitempty"`
// IncludedArtifacts contains a list of the last successfully included // IncludedArtifacts contains a list of the last successfully included
// Artifacts as instructed by GitRepositorySpec.Include. // Artifacts as instructed by GitRepositorySpec.Include.
// +optional // +optional
IncludedArtifacts []*apiv1.Artifact `json:"includedArtifacts,omitempty"` IncludedArtifacts []*Artifact `json:"includedArtifacts,omitempty"`
// ContentConfigChecksum is a checksum of all the configurations related to
// the content of the source artifact:
// - .spec.ignore
// - .spec.recurseSubmodules
// - .spec.included and the checksum of the included artifacts
// observed in .status.observedGeneration version of the object. This can
// be used to determine if the content of the included repository has
// changed.
// It has the format of `<algo>:<checksum>`, for example: `sha256:<checksum>`.
//
// Deprecated: Replaced with explicit fields for observed artifact content
// config in the status.
// +optional
ContentConfigChecksum string `json:"contentConfigChecksum,omitempty"`
// ObservedIgnore is the observed exclusion patterns used for constructing
// the source artifact.
// +optional
ObservedIgnore *string `json:"observedIgnore,omitempty"`
// ObservedRecurseSubmodules is the observed resource submodules
// configuration used to produce the current Artifact.
// +optional
ObservedRecurseSubmodules bool `json:"observedRecurseSubmodules,omitempty"`
// ObservedInclude is the observed list of GitRepository resources used to
// to produce the current Artifact.
// +optional
ObservedInclude []GitRepositoryInclude `json:"observedInclude,omitempty"`
meta.ReconcileRequestStatus `json:",inline"` meta.ReconcileRequestStatus `json:",inline"`
} }
@ -282,15 +242,16 @@ func (in GitRepository) GetRequeueAfter() time.Duration {
// GetArtifact returns the latest Artifact from the GitRepository if present in // GetArtifact returns the latest Artifact from the GitRepository if present in
// the status sub-resource. // the status sub-resource.
func (in *GitRepository) GetArtifact() *apiv1.Artifact { func (in *GitRepository) GetArtifact() *Artifact {
return in.Status.Artifact return in.Status.Artifact
} }
// +genclient // +genclient
// +genclient:Namespaced
// +kubebuilder:storageversion
// +kubebuilder:object:root=true // +kubebuilder:object:root=true
// +kubebuilder:resource:shortName=gitrepo // +kubebuilder:resource:shortName=gitrepo
// +kubebuilder:subresource:status // +kubebuilder:subresource:status
// +kubebuilder:deprecatedversion:warning="v1beta2 GitRepository is deprecated, upgrade to v1"
// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url` // +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url`
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" // +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""

View File

@ -23,8 +23,6 @@ import (
"github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/acl"
"github.com/fluxcd/pkg/apis/meta" "github.com/fluxcd/pkg/apis/meta"
apiv1 "github.com/fluxcd/source-controller/api/v1"
) )
// HelmChartKind is the string representation of a HelmChart. // HelmChartKind is the string representation of a HelmChart.
@ -47,11 +45,7 @@ type HelmChartSpec struct {
// +required // +required
SourceRef LocalHelmChartSourceReference `json:"sourceRef"` SourceRef LocalHelmChartSourceReference `json:"sourceRef"`
// Interval at which the HelmChart SourceRef is checked for updates. // Interval is the interval at which to check the Source for updates.
// This interval is approximate and may be subject to jitter to ensure
// efficient use of resources.
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
// +required // +required
Interval metav1.Duration `json:"interval"` Interval metav1.Duration `json:"interval"`
@ -80,11 +74,6 @@ type HelmChartSpec struct {
// +deprecated // +deprecated
ValuesFile string `json:"valuesFile,omitempty"` ValuesFile string `json:"valuesFile,omitempty"`
// IgnoreMissingValuesFiles controls whether to silently ignore missing values
// files rather than failing.
// +optional
IgnoreMissingValuesFiles bool `json:"ignoreMissingValuesFiles,omitempty"`
// Suspend tells the controller to suspend the reconciliation of this // Suspend tells the controller to suspend the reconciliation of this
// source. // source.
// +optional // +optional
@ -95,14 +84,6 @@ type HelmChartSpec struct {
// NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092 // NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092
// +optional // +optional
AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"` AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"`
// Verify contains the secret name containing the trusted public keys
// used to verify the signature and specifies which provider to use to check
// whether OCI image is authentic.
// This field is only supported when using HelmRepository source with spec.type 'oci'.
// Chart dependencies, which are not bundled in the umbrella chart artifact, are not verified.
// +optional
Verify *apiv1.OCIRepositoryVerification `json:"verify,omitempty"`
} }
const ( const (
@ -148,12 +129,6 @@ type HelmChartStatus struct {
// +optional // +optional
ObservedChartName string `json:"observedChartName,omitempty"` ObservedChartName string `json:"observedChartName,omitempty"`
// ObservedValuesFiles are the observed value files of the last successful
// reconciliation.
// It matches the chart in the last successfully reconciled artifact.
// +optional
ObservedValuesFiles []string `json:"observedValuesFiles,omitempty"`
// Conditions holds the conditions for the HelmChart. // Conditions holds the conditions for the HelmChart.
// +optional // +optional
Conditions []metav1.Condition `json:"conditions,omitempty"` Conditions []metav1.Condition `json:"conditions,omitempty"`
@ -166,7 +141,7 @@ type HelmChartStatus struct {
// Artifact represents the output of the last successful reconciliation. // Artifact represents the output of the last successful reconciliation.
// +optional // +optional
Artifact *apiv1.Artifact `json:"artifact,omitempty"` Artifact *Artifact `json:"artifact,omitempty"`
meta.ReconcileRequestStatus `json:",inline"` meta.ReconcileRequestStatus `json:",inline"`
} }
@ -199,7 +174,7 @@ func (in HelmChart) GetRequeueAfter() time.Duration {
// GetArtifact returns the latest artifact from the source if present in the // GetArtifact returns the latest artifact from the source if present in the
// status sub-resource. // status sub-resource.
func (in *HelmChart) GetArtifact() *apiv1.Artifact { func (in *HelmChart) GetArtifact() *Artifact {
return in.Status.Artifact return in.Status.Artifact
} }
@ -215,10 +190,11 @@ func (in *HelmChart) GetValuesFiles() []string {
} }
// +genclient // +genclient
// +genclient:Namespaced
// +kubebuilder:storageversion
// +kubebuilder:object:root=true // +kubebuilder:object:root=true
// +kubebuilder:resource:shortName=hc // +kubebuilder:resource:shortName=hc
// +kubebuilder:subresource:status // +kubebuilder:subresource:status
// +kubebuilder:deprecatedversion:warning="v1beta2 HelmChart is deprecated, upgrade to v1"
// +kubebuilder:printcolumn:name="Chart",type=string,JSONPath=`.spec.chart` // +kubebuilder:printcolumn:name="Chart",type=string,JSONPath=`.spec.chart`
// +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.spec.version` // +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.spec.version`
// +kubebuilder:printcolumn:name="Source Kind",type=string,JSONPath=`.spec.sourceRef.kind` // +kubebuilder:printcolumn:name="Source Kind",type=string,JSONPath=`.spec.sourceRef.kind`

View File

@ -23,8 +23,6 @@ import (
"github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/acl"
"github.com/fluxcd/pkg/apis/meta" "github.com/fluxcd/pkg/apis/meta"
apiv1 "github.com/fluxcd/source-controller/api/v1"
) )
const ( const (
@ -33,11 +31,6 @@ const (
// HelmRepositoryURLIndexKey is the key used for indexing HelmRepository // HelmRepositoryURLIndexKey is the key used for indexing HelmRepository
// objects by their HelmRepositorySpec.URL. // objects by their HelmRepositorySpec.URL.
HelmRepositoryURLIndexKey = ".metadata.helmRepositoryURL" HelmRepositoryURLIndexKey = ".metadata.helmRepositoryURL"
// HelmRepositoryTypeDefault is the default HelmRepository type.
// It is used when no type is specified and corresponds to a Helm repository.
HelmRepositoryTypeDefault = "default"
// HelmRepositoryTypeOCI is the type for an OCI repository.
HelmRepositoryTypeOCI = "oci"
) )
// HelmRepositorySpec specifies the required configuration to produce an // HelmRepositorySpec specifies the required configuration to produce an
@ -45,7 +38,6 @@ const (
type HelmRepositorySpec struct { type HelmRepositorySpec struct {
// URL of the Helm repository, a valid URL contains at least a protocol and // URL of the Helm repository, a valid URL contains at least a protocol and
// host. // host.
// +kubebuilder:validation:Pattern="^(http|https|oci)://.*$"
// +required // +required
URL string `json:"url"` URL string `json:"url"`
@ -53,29 +45,11 @@ type HelmRepositorySpec struct {
// for the HelmRepository. // for the HelmRepository.
// For HTTP/S basic auth the secret must contain 'username' and 'password' // For HTTP/S basic auth the secret must contain 'username' and 'password'
// fields. // fields.
// Support for TLS auth using the 'certFile' and 'keyFile', and/or 'caFile' // For TLS the secret must contain a 'certFile' and 'keyFile', and/or
// keys is deprecated. Please use `.spec.certSecretRef` instead. // 'caCert' fields.
// +optional // +optional
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"` SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
// CertSecretRef can be given the name of a Secret containing
// either or both of
//
// - a PEM-encoded client certificate (`tls.crt`) and private
// key (`tls.key`);
// - a PEM-encoded CA certificate (`ca.crt`)
//
// and whichever are supplied, will be used for connecting to the
// registry. The client cert and key are useful if you are
// authenticating with a certificate; the CA cert is useful if
// you are using a self-signed server certificate. The Secret must
// be of type `Opaque` or `kubernetes.io/tls`.
//
// It takes precedence over the values specified in the Secret referred
// to by `.spec.secretRef`.
// +optional
CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"`
// PassCredentials allows the credentials from the SecretRef to be passed // PassCredentials allows the credentials from the SecretRef to be passed
// on to a host that does not match the host as defined in URL. // on to a host that does not match the host as defined in URL.
// This may be required if the host of the advertised chart URLs in the // This may be required if the host of the advertised chart URLs in the
@ -85,25 +59,12 @@ type HelmRepositorySpec struct {
// +optional // +optional
PassCredentials bool `json:"passCredentials,omitempty"` PassCredentials bool `json:"passCredentials,omitempty"`
// Interval at which the HelmRepository URL is checked for updates. // Interval at which to check the URL for updates.
// This interval is approximate and may be subject to jitter to ensure // +required
// efficient use of resources. Interval metav1.Duration `json:"interval"`
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
// +optional
Interval metav1.Duration `json:"interval,omitempty"`
// Insecure allows connecting to a non-TLS HTTP container registry. // Timeout of the index fetch operation, defaults to 60s.
// This field is only taken into account if the .spec.type field is set to 'oci'. // +kubebuilder:default:="60s"
// +optional
Insecure bool `json:"insecure,omitempty"`
// Timeout is used for the index fetch operation for an HTTPS helm repository,
// and for remote OCI Repository operations like pulling for an OCI helm
// chart by the associated HelmChart.
// Its default value is 60s.
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$"
// +optional // +optional
Timeout *metav1.Duration `json:"timeout,omitempty"` Timeout *metav1.Duration `json:"timeout,omitempty"`
@ -117,20 +78,6 @@ type HelmRepositorySpec struct {
// NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092 // NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092
// +optional // +optional
AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"` AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"`
// Type of the HelmRepository.
// When this field is set to "oci", the URL field value must be prefixed with "oci://".
// +kubebuilder:validation:Enum=default;oci
// +optional
Type string `json:"type,omitempty"`
// Provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'.
// This field is optional, and only taken into account if the .spec.type field is set to 'oci'.
// When not specified, defaults to 'generic'.
// +kubebuilder:validation:Enum=generic;aws;azure;gcp
// +kubebuilder:default:=generic
// +optional
Provider string `json:"provider,omitempty"`
} }
// HelmRepositoryStatus records the observed state of the HelmRepository. // HelmRepositoryStatus records the observed state of the HelmRepository.
@ -152,7 +99,7 @@ type HelmRepositoryStatus struct {
// Artifact represents the last successful HelmRepository reconciliation. // Artifact represents the last successful HelmRepository reconciliation.
// +optional // +optional
Artifact *apiv1.Artifact `json:"artifact,omitempty"` Artifact *Artifact `json:"artifact,omitempty"`
meta.ReconcileRequestStatus `json:",inline"` meta.ReconcileRequestStatus `json:",inline"`
} }
@ -176,32 +123,21 @@ func (in *HelmRepository) SetConditions(conditions []metav1.Condition) {
// GetRequeueAfter returns the duration after which the source must be // GetRequeueAfter returns the duration after which the source must be
// reconciled again. // reconciled again.
func (in HelmRepository) GetRequeueAfter() time.Duration { func (in HelmRepository) GetRequeueAfter() time.Duration {
if in.Spec.Interval.Duration != 0 { return in.Spec.Interval.Duration
return in.Spec.Interval.Duration
}
return time.Minute
}
// GetTimeout returns the timeout duration used for various operations related
// to this HelmRepository.
func (in HelmRepository) GetTimeout() time.Duration {
if in.Spec.Timeout != nil {
return in.Spec.Timeout.Duration
}
return time.Minute
} }
// GetArtifact returns the latest artifact from the source if present in the // GetArtifact returns the latest artifact from the source if present in the
// status sub-resource. // status sub-resource.
func (in *HelmRepository) GetArtifact() *apiv1.Artifact { func (in *HelmRepository) GetArtifact() *Artifact {
return in.Status.Artifact return in.Status.Artifact
} }
// +genclient // +genclient
// +genclient:Namespaced
// +kubebuilder:storageversion
// +kubebuilder:object:root=true // +kubebuilder:object:root=true
// +kubebuilder:resource:shortName=helmrepo // +kubebuilder:resource:shortName=helmrepo
// +kubebuilder:subresource:status // +kubebuilder:subresource:status
// +kubebuilder:deprecatedversion:warning="v1beta2 HelmRepository is deprecated, upgrade to v1"
// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url` // +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url`
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="" // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description="" // +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""

View File

@ -1,315 +0,0 @@
/*
Copyright 2022 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta2
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/fluxcd/pkg/apis/meta"
apiv1 "github.com/fluxcd/source-controller/api/v1"
)
const (
// OCIRepositoryKind is the string representation of a OCIRepository.
OCIRepositoryKind = "OCIRepository"
// OCIRepositoryPrefix is the prefix used for OCIRepository URLs.
OCIRepositoryPrefix = "oci://"
// GenericOCIProvider provides support for authentication using static credentials
// for any OCI compatible API such as Docker Registry, GitHub Container Registry,
// Docker Hub, Quay, etc.
GenericOCIProvider string = "generic"
// AmazonOCIProvider provides support for OCI authentication using AWS IRSA.
AmazonOCIProvider string = "aws"
// GoogleOCIProvider provides support for OCI authentication using GCP workload identity.
GoogleOCIProvider string = "gcp"
// AzureOCIProvider provides support for OCI authentication using a Azure Service Principal,
// Managed Identity or Shared Key.
AzureOCIProvider string = "azure"
// OCILayerExtract defines the operation type for extracting the content from an OCI artifact layer.
OCILayerExtract = "extract"
// OCILayerCopy defines the operation type for copying the content from an OCI artifact layer.
OCILayerCopy = "copy"
)
// OCIRepositorySpec defines the desired state of OCIRepository
type OCIRepositorySpec struct {
// URL is a reference to an OCI artifact repository hosted
// on a remote container registry.
// +kubebuilder:validation:Pattern="^oci://.*$"
// +required
URL string `json:"url"`
// The OCI reference to pull and monitor for changes,
// defaults to the latest tag.
// +optional
Reference *OCIRepositoryRef `json:"ref,omitempty"`
// LayerSelector specifies which layer should be extracted from the OCI artifact.
// When not specified, the first layer found in the artifact is selected.
// +optional
LayerSelector *OCILayerSelector `json:"layerSelector,omitempty"`
// The provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'.
// When not specified, defaults to 'generic'.
// +kubebuilder:validation:Enum=generic;aws;azure;gcp
// +kubebuilder:default:=generic
// +optional
Provider string `json:"provider,omitempty"`
// SecretRef contains the secret name containing the registry login
// credentials to resolve image metadata.
// The secret must be of type kubernetes.io/dockerconfigjson.
// +optional
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
// Verify contains the secret name containing the trusted public keys
// used to verify the signature and specifies which provider to use to check
// whether OCI image is authentic.
// +optional
Verify *apiv1.OCIRepositoryVerification `json:"verify,omitempty"`
// ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate
// the image pull if the service account has attached pull secrets. For more information:
// https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account
// +optional
ServiceAccountName string `json:"serviceAccountName,omitempty"`
// CertSecretRef can be given the name of a Secret containing
// either or both of
//
// - a PEM-encoded client certificate (`tls.crt`) and private
// key (`tls.key`);
// - a PEM-encoded CA certificate (`ca.crt`)
//
// and whichever are supplied, will be used for connecting to the
// registry. The client cert and key are useful if you are
// authenticating with a certificate; the CA cert is useful if
// you are using a self-signed server certificate. The Secret must
// be of type `Opaque` or `kubernetes.io/tls`.
//
// Note: Support for the `caFile`, `certFile` and `keyFile` keys have
// been deprecated.
// +optional
CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"`
// ProxySecretRef specifies the Secret containing the proxy configuration
// to use while communicating with the container registry.
// +optional
ProxySecretRef *meta.LocalObjectReference `json:"proxySecretRef,omitempty"`
// Interval at which the OCIRepository URL is checked for updates.
// This interval is approximate and may be subject to jitter to ensure
// efficient use of resources.
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
// +required
Interval metav1.Duration `json:"interval"`
// The timeout for remote OCI Repository operations like pulling, defaults to 60s.
// +kubebuilder:default="60s"
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$"
// +optional
Timeout *metav1.Duration `json:"timeout,omitempty"`
// Ignore overrides the set of excluded patterns in the .sourceignore format
// (which is the same as .gitignore). If not provided, a default will be used,
// consult the documentation for your version to find out what those are.
// +optional
Ignore *string `json:"ignore,omitempty"`
// Insecure allows connecting to a non-TLS HTTP container registry.
// +optional
Insecure bool `json:"insecure,omitempty"`
// This flag tells the controller to suspend the reconciliation of this source.
// +optional
Suspend bool `json:"suspend,omitempty"`
}
// OCIRepositoryRef defines the image reference for the OCIRepository's URL
type OCIRepositoryRef struct {
// Digest is the image digest to pull, takes precedence over SemVer.
// The value should be in the format 'sha256:<HASH>'.
// +optional
Digest string `json:"digest,omitempty"`
// SemVer is the range of tags to pull selecting the latest within
// the range, takes precedence over Tag.
// +optional
SemVer string `json:"semver,omitempty"`
// SemverFilter is a regex pattern to filter the tags within the SemVer range.
// +optional
SemverFilter string `json:"semverFilter,omitempty"`
// Tag is the image tag to pull, defaults to latest.
// +optional
Tag string `json:"tag,omitempty"`
}
// OCILayerSelector specifies which layer should be extracted from an OCI Artifact
type OCILayerSelector struct {
// MediaType specifies the OCI media type of the layer
// which should be extracted from the OCI Artifact. The
// first layer matching this type is selected.
// +optional
MediaType string `json:"mediaType,omitempty"`
// Operation specifies how the selected layer should be processed.
// By default, the layer compressed content is extracted to storage.
// When the operation is set to 'copy', the layer compressed content
// is persisted to storage as it is.
// +kubebuilder:validation:Enum=extract;copy
// +optional
Operation string `json:"operation,omitempty"`
}
// OCIRepositoryStatus defines the observed state of OCIRepository
type OCIRepositoryStatus struct {
// ObservedGeneration is the last observed generation.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
// Conditions holds the conditions for the OCIRepository.
// +optional
Conditions []metav1.Condition `json:"conditions,omitempty"`
// URL is the download link for the artifact output of the last OCI Repository sync.
// +optional
URL string `json:"url,omitempty"`
// Artifact represents the output of the last successful OCI Repository sync.
// +optional
Artifact *apiv1.Artifact `json:"artifact,omitempty"`
// ContentConfigChecksum is a checksum of all the configurations related to
// the content of the source artifact:
// - .spec.ignore
// - .spec.layerSelector
// observed in .status.observedGeneration version of the object. This can
// be used to determine if the content configuration has changed and the
// artifact needs to be rebuilt.
// It has the format of `<algo>:<checksum>`, for example: `sha256:<checksum>`.
//
// Deprecated: Replaced with explicit fields for observed artifact content
// config in the status.
// +optional
ContentConfigChecksum string `json:"contentConfigChecksum,omitempty"`
// ObservedIgnore is the observed exclusion patterns used for constructing
// the source artifact.
// +optional
ObservedIgnore *string `json:"observedIgnore,omitempty"`
// ObservedLayerSelector is the observed layer selector used for constructing
// the source artifact.
// +optional
ObservedLayerSelector *OCILayerSelector `json:"observedLayerSelector,omitempty"`
meta.ReconcileRequestStatus `json:",inline"`
}
const (
// OCIPullFailedReason signals that a pull operation failed.
OCIPullFailedReason string = "OCIArtifactPullFailed"
// OCILayerOperationFailedReason signals that an OCI layer operation failed.
OCILayerOperationFailedReason string = "OCIArtifactLayerOperationFailed"
)
// GetConditions returns the status conditions of the object.
func (in OCIRepository) GetConditions() []metav1.Condition {
return in.Status.Conditions
}
// SetConditions sets the status conditions on the object.
func (in *OCIRepository) SetConditions(conditions []metav1.Condition) {
in.Status.Conditions = conditions
}
// GetRequeueAfter returns the duration after which the OCIRepository must be
// reconciled again.
func (in OCIRepository) GetRequeueAfter() time.Duration {
return in.Spec.Interval.Duration
}
// GetArtifact returns the latest Artifact from the OCIRepository if present in
// the status sub-resource.
func (in *OCIRepository) GetArtifact() *apiv1.Artifact {
return in.Status.Artifact
}
// GetLayerMediaType returns the media type layer selector if found in spec.
func (in *OCIRepository) GetLayerMediaType() string {
if in.Spec.LayerSelector == nil {
return ""
}
return in.Spec.LayerSelector.MediaType
}
// GetLayerOperation returns the layer selector operation (defaults to extract).
func (in *OCIRepository) GetLayerOperation() string {
if in.Spec.LayerSelector == nil || in.Spec.LayerSelector.Operation == "" {
return OCILayerExtract
}
return in.Spec.LayerSelector.Operation
}
// +genclient
// +kubebuilder:object:root=true
// +kubebuilder:resource:shortName=ocirepo
// +kubebuilder:subresource:status
// +kubebuilder:deprecatedversion:warning="v1beta2 OCIRepository is deprecated, upgrade to v1"
// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url`
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
// OCIRepository is the Schema for the ocirepositories API
type OCIRepository struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec OCIRepositorySpec `json:"spec,omitempty"`
// +kubebuilder:default={"observedGeneration":-1}
Status OCIRepositoryStatus `json:"status,omitempty"`
}
// OCIRepositoryList contains a list of OCIRepository
// +kubebuilder:object:root=true
type OCIRepositoryList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []OCIRepository `json:"items"`
}
func init() {
SchemeBuilder.Register(&OCIRepository{}, &OCIRepositoryList{})
}

View File

@ -33,9 +33,6 @@ const (
// interval. It must be supported by all kinds of the source.toolkit.fluxcd.io // interval. It must be supported by all kinds of the source.toolkit.fluxcd.io
// API group. // API group.
// //
// Deprecated: use the Source interface from api/v1 instead. This type will be
// removed in a future release.
//
// +k8s:deepcopy-gen=false // +k8s:deepcopy-gen=false
type Source interface { type Source interface {
runtime.Object runtime.Object

View File

@ -1,7 +1,8 @@
//go:build !ignore_autogenerated //go:build !ignore_autogenerated
// +build !ignore_autogenerated
/* /*
Copyright 2024 The Flux authors Copyright 2022 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@ -23,7 +24,6 @@ package v1beta2
import ( import (
"github.com/fluxcd/pkg/apis/acl" "github.com/fluxcd/pkg/apis/acl"
"github.com/fluxcd/pkg/apis/meta" "github.com/fluxcd/pkg/apis/meta"
apiv1 "github.com/fluxcd/source-controller/api/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
) )
@ -37,13 +37,6 @@ func (in *Artifact) DeepCopyInto(out *Artifact) {
*out = new(int64) *out = new(int64)
**out = **in **out = **in
} }
if in.Metadata != nil {
in, out := &in.Metadata, &out.Metadata
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
} }
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Artifact. // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Artifact.
@ -115,54 +108,14 @@ func (in *BucketList) DeepCopyObject() runtime.Object {
return nil return nil
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BucketSTSSpec) DeepCopyInto(out *BucketSTSSpec) {
*out = *in
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
if in.CertSecretRef != nil {
in, out := &in.CertSecretRef, &out.CertSecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketSTSSpec.
func (in *BucketSTSSpec) DeepCopy() *BucketSTSSpec {
if in == nil {
return nil
}
out := new(BucketSTSSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BucketSpec) DeepCopyInto(out *BucketSpec) { func (in *BucketSpec) DeepCopyInto(out *BucketSpec) {
*out = *in *out = *in
if in.STS != nil {
in, out := &in.STS, &out.STS
*out = new(BucketSTSSpec)
(*in).DeepCopyInto(*out)
}
if in.SecretRef != nil { if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef in, out := &in.SecretRef, &out.SecretRef
*out = new(meta.LocalObjectReference) *out = new(meta.LocalObjectReference)
**out = **in **out = **in
} }
if in.CertSecretRef != nil {
in, out := &in.CertSecretRef, &out.CertSecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
if in.ProxySecretRef != nil {
in, out := &in.ProxySecretRef, &out.ProxySecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
out.Interval = in.Interval out.Interval = in.Interval
if in.Timeout != nil { if in.Timeout != nil {
in, out := &in.Timeout, &out.Timeout in, out := &in.Timeout, &out.Timeout
@ -203,14 +156,9 @@ func (in *BucketStatus) DeepCopyInto(out *BucketStatus) {
} }
if in.Artifact != nil { if in.Artifact != nil {
in, out := &in.Artifact, &out.Artifact in, out := &in.Artifact, &out.Artifact
*out = new(apiv1.Artifact) *out = new(Artifact)
(*in).DeepCopyInto(*out) (*in).DeepCopyInto(*out)
} }
if in.ObservedIgnore != nil {
in, out := &in.ObservedIgnore, &out.ObservedIgnore
*out = new(string)
**out = **in
}
out.ReconcileRequestStatus = in.ReconcileRequestStatus out.ReconcileRequestStatus = in.ReconcileRequestStatus
} }
@ -377,30 +325,20 @@ func (in *GitRepositoryStatus) DeepCopyInto(out *GitRepositoryStatus) {
} }
if in.Artifact != nil { if in.Artifact != nil {
in, out := &in.Artifact, &out.Artifact in, out := &in.Artifact, &out.Artifact
*out = new(apiv1.Artifact) *out = new(Artifact)
(*in).DeepCopyInto(*out) (*in).DeepCopyInto(*out)
} }
if in.IncludedArtifacts != nil { if in.IncludedArtifacts != nil {
in, out := &in.IncludedArtifacts, &out.IncludedArtifacts in, out := &in.IncludedArtifacts, &out.IncludedArtifacts
*out = make([]*apiv1.Artifact, len(*in)) *out = make([]*Artifact, len(*in))
for i := range *in { for i := range *in {
if (*in)[i] != nil { if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i] in, out := &(*in)[i], &(*out)[i]
*out = new(apiv1.Artifact) *out = new(Artifact)
(*in).DeepCopyInto(*out) (*in).DeepCopyInto(*out)
} }
} }
} }
if in.ObservedIgnore != nil {
in, out := &in.ObservedIgnore, &out.ObservedIgnore
*out = new(string)
**out = **in
}
if in.ObservedInclude != nil {
in, out := &in.ObservedInclude, &out.ObservedInclude
*out = make([]GitRepositoryInclude, len(*in))
copy(*out, *in)
}
out.ReconcileRequestStatus = in.ReconcileRequestStatus out.ReconcileRequestStatus = in.ReconcileRequestStatus
} }
@ -504,11 +442,6 @@ func (in *HelmChartSpec) DeepCopyInto(out *HelmChartSpec) {
*out = new(acl.AccessFrom) *out = new(acl.AccessFrom)
(*in).DeepCopyInto(*out) (*in).DeepCopyInto(*out)
} }
if in.Verify != nil {
in, out := &in.Verify, &out.Verify
*out = new(apiv1.OCIRepositoryVerification)
(*in).DeepCopyInto(*out)
}
} }
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartSpec. // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartSpec.
@ -524,11 +457,6 @@ func (in *HelmChartSpec) DeepCopy() *HelmChartSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HelmChartStatus) DeepCopyInto(out *HelmChartStatus) { func (in *HelmChartStatus) DeepCopyInto(out *HelmChartStatus) {
*out = *in *out = *in
if in.ObservedValuesFiles != nil {
in, out := &in.ObservedValuesFiles, &out.ObservedValuesFiles
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Conditions != nil { if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions in, out := &in.Conditions, &out.Conditions
*out = make([]v1.Condition, len(*in)) *out = make([]v1.Condition, len(*in))
@ -538,7 +466,7 @@ func (in *HelmChartStatus) DeepCopyInto(out *HelmChartStatus) {
} }
if in.Artifact != nil { if in.Artifact != nil {
in, out := &in.Artifact, &out.Artifact in, out := &in.Artifact, &out.Artifact
*out = new(apiv1.Artifact) *out = new(Artifact)
(*in).DeepCopyInto(*out) (*in).DeepCopyInto(*out)
} }
out.ReconcileRequestStatus = in.ReconcileRequestStatus out.ReconcileRequestStatus = in.ReconcileRequestStatus
@ -621,11 +549,6 @@ func (in *HelmRepositorySpec) DeepCopyInto(out *HelmRepositorySpec) {
*out = new(meta.LocalObjectReference) *out = new(meta.LocalObjectReference)
**out = **in **out = **in
} }
if in.CertSecretRef != nil {
in, out := &in.CertSecretRef, &out.CertSecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
out.Interval = in.Interval out.Interval = in.Interval
if in.Timeout != nil { if in.Timeout != nil {
in, out := &in.Timeout, &out.Timeout in, out := &in.Timeout, &out.Timeout
@ -661,7 +584,7 @@ func (in *HelmRepositoryStatus) DeepCopyInto(out *HelmRepositoryStatus) {
} }
if in.Artifact != nil { if in.Artifact != nil {
in, out := &in.Artifact, &out.Artifact in, out := &in.Artifact, &out.Artifact
*out = new(apiv1.Artifact) *out = new(Artifact)
(*in).DeepCopyInto(*out) (*in).DeepCopyInto(*out)
} }
out.ReconcileRequestStatus = in.ReconcileRequestStatus out.ReconcileRequestStatus = in.ReconcileRequestStatus
@ -691,186 +614,3 @@ func (in *LocalHelmChartSourceReference) DeepCopy() *LocalHelmChartSourceReferen
in.DeepCopyInto(out) in.DeepCopyInto(out)
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OCILayerSelector) DeepCopyInto(out *OCILayerSelector) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCILayerSelector.
func (in *OCILayerSelector) DeepCopy() *OCILayerSelector {
if in == nil {
return nil
}
out := new(OCILayerSelector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OCIRepository) DeepCopyInto(out *OCIRepository) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepository.
func (in *OCIRepository) DeepCopy() *OCIRepository {
if in == nil {
return nil
}
out := new(OCIRepository)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *OCIRepository) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OCIRepositoryList) DeepCopyInto(out *OCIRepositoryList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]OCIRepository, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryList.
func (in *OCIRepositoryList) DeepCopy() *OCIRepositoryList {
if in == nil {
return nil
}
out := new(OCIRepositoryList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *OCIRepositoryList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OCIRepositoryRef) DeepCopyInto(out *OCIRepositoryRef) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryRef.
func (in *OCIRepositoryRef) DeepCopy() *OCIRepositoryRef {
if in == nil {
return nil
}
out := new(OCIRepositoryRef)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OCIRepositorySpec) DeepCopyInto(out *OCIRepositorySpec) {
*out = *in
if in.Reference != nil {
in, out := &in.Reference, &out.Reference
*out = new(OCIRepositoryRef)
**out = **in
}
if in.LayerSelector != nil {
in, out := &in.LayerSelector, &out.LayerSelector
*out = new(OCILayerSelector)
**out = **in
}
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
if in.Verify != nil {
in, out := &in.Verify, &out.Verify
*out = new(apiv1.OCIRepositoryVerification)
(*in).DeepCopyInto(*out)
}
if in.CertSecretRef != nil {
in, out := &in.CertSecretRef, &out.CertSecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
if in.ProxySecretRef != nil {
in, out := &in.ProxySecretRef, &out.ProxySecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
out.Interval = in.Interval
if in.Timeout != nil {
in, out := &in.Timeout, &out.Timeout
*out = new(v1.Duration)
**out = **in
}
if in.Ignore != nil {
in, out := &in.Ignore, &out.Ignore
*out = new(string)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositorySpec.
func (in *OCIRepositorySpec) DeepCopy() *OCIRepositorySpec {
if in == nil {
return nil
}
out := new(OCIRepositorySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OCIRepositoryStatus) DeepCopyInto(out *OCIRepositoryStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]v1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Artifact != nil {
in, out := &in.Artifact, &out.Artifact
*out = new(apiv1.Artifact)
(*in).DeepCopyInto(*out)
}
if in.ObservedIgnore != nil {
in, out := &in.ObservedIgnore, &out.ObservedIgnore
*out = new(string)
**out = **in
}
if in.ObservedLayerSelector != nil {
in, out := &in.ObservedLayerSelector, &out.ObservedLayerSelector
*out = new(OCILayerSelector)
**out = **in
}
out.ReconcileRequestStatus = in.ReconcileRequestStatus
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryStatus.
func (in *OCIRepositoryStatus) DeepCopy() *OCIRepositoryStatus {
if in == nil {
return nil
}
out := new(OCIRepositoryStatus)
in.DeepCopyInto(out)
return out
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,9 +1,11 @@
--- ---
apiVersion: apiextensions.k8s.io/v1 apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition kind: CustomResourceDefinition
metadata: metadata:
annotations: annotations:
controller-gen.kubebuilder.io/version: v0.16.1 controller-gen.kubebuilder.io/version: v0.7.0
creationTimestamp: null
name: helmrepositories.source.toolkit.fluxcd.io name: helmrepositories.source.toolkit.fluxcd.io
spec: spec:
group: source.toolkit.fluxcd.io group: source.toolkit.fluxcd.io
@ -16,308 +18,6 @@ spec:
singular: helmrepository singular: helmrepository
scope: Namespaced scope: Namespaced
versions: versions:
- additionalPrinterColumns:
- jsonPath: .spec.url
name: URL
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
- jsonPath: .status.conditions[?(@.type=="Ready")].status
name: Ready
type: string
- jsonPath: .status.conditions[?(@.type=="Ready")].message
name: Status
type: string
name: v1
schema:
openAPIV3Schema:
description: HelmRepository is the Schema for the helmrepositories API.
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: |-
HelmRepositorySpec specifies the required configuration to produce an
Artifact for a Helm repository index YAML.
properties:
accessFrom:
description: |-
AccessFrom specifies an Access Control List for allowing cross-namespace
references to this object.
NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092
properties:
namespaceSelectors:
description: |-
NamespaceSelectors is the list of namespace selectors to which this ACL applies.
Items in this list are evaluated using a logical OR operation.
items:
description: |-
NamespaceSelector selects the namespaces to which this ACL applies.
An empty map of MatchLabels matches all namespaces in a cluster.
properties:
matchLabels:
additionalProperties:
type: string
description: |-
MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions, whose key field is "key", the
operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
type: array
required:
- namespaceSelectors
type: object
certSecretRef:
description: |-
CertSecretRef can be given the name of a Secret containing
either or both of
- a PEM-encoded client certificate (`tls.crt`) and private
key (`tls.key`);
- a PEM-encoded CA certificate (`ca.crt`)
and whichever are supplied, will be used for connecting to the
registry. The client cert and key are useful if you are
authenticating with a certificate; the CA cert is useful if
you are using a self-signed server certificate. The Secret must
be of type `Opaque` or `kubernetes.io/tls`.
It takes precedence over the values specified in the Secret referred
to by `.spec.secretRef`.
properties:
name:
description: Name of the referent.
type: string
required:
- name
type: object
insecure:
description: |-
Insecure allows connecting to a non-TLS HTTP container registry.
This field is only taken into account if the .spec.type field is set to 'oci'.
type: boolean
interval:
description: |-
Interval at which the HelmRepository URL is checked for updates.
This interval is approximate and may be subject to jitter to ensure
efficient use of resources.
pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$
type: string
passCredentials:
description: |-
PassCredentials allows the credentials from the SecretRef to be passed
on to a host that does not match the host as defined in URL.
This may be required if the host of the advertised chart URLs in the
index differ from the defined URL.
Enabling this should be done with caution, as it can potentially result
in credentials getting stolen in a MITM-attack.
type: boolean
provider:
default: generic
description: |-
Provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'.
This field is optional, and only taken into account if the .spec.type field is set to 'oci'.
When not specified, defaults to 'generic'.
enum:
- generic
- aws
- azure
- gcp
type: string
secretRef:
description: |-
SecretRef specifies the Secret containing authentication credentials
for the HelmRepository.
For HTTP/S basic auth the secret must contain 'username' and 'password'
fields.
Support for TLS auth using the 'certFile' and 'keyFile', and/or 'caFile'
keys is deprecated. Please use `.spec.certSecretRef` instead.
properties:
name:
description: Name of the referent.
type: string
required:
- name
type: object
suspend:
description: |-
Suspend tells the controller to suspend the reconciliation of this
HelmRepository.
type: boolean
timeout:
description: |-
Timeout is used for the index fetch operation for an HTTPS helm repository,
and for remote OCI Repository operations like pulling for an OCI helm
chart by the associated HelmChart.
Its default value is 60s.
pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$
type: string
type:
description: |-
Type of the HelmRepository.
When this field is set to "oci", the URL field value must be prefixed with "oci://".
enum:
- default
- oci
type: string
url:
description: |-
URL of the Helm repository, a valid URL contains at least a protocol and
host.
pattern: ^(http|https|oci)://.*$
type: string
required:
- url
type: object
status:
default:
observedGeneration: -1
description: HelmRepositoryStatus records the observed state of the HelmRepository.
properties:
artifact:
description: Artifact represents the last successful HelmRepository
reconciliation.
properties:
digest:
description: Digest is the digest of the file in the form of '<algorithm>:<checksum>'.
pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$
type: string
lastUpdateTime:
description: |-
LastUpdateTime is the timestamp corresponding to the last update of the
Artifact.
format: date-time
type: string
metadata:
additionalProperties:
type: string
description: Metadata holds upstream information such as OCI annotations.
type: object
path:
description: |-
Path is the relative file path of the Artifact. It can be used to locate
the file in the root of the Artifact storage on the local file system of
the controller managing the Source.
type: string
revision:
description: |-
Revision is a human-readable identifier traceable in the origin source
system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.
type: string
size:
description: Size is the number of bytes in the file.
format: int64
type: integer
url:
description: |-
URL is the HTTP address of the Artifact as exposed by the controller
managing the Source. It can be used to retrieve the Artifact for
consumption, e.g. by another controller applying the Artifact contents.
type: string
required:
- lastUpdateTime
- path
- revision
- url
type: object
conditions:
description: Conditions holds the conditions for the HelmRepository.
items:
description: Condition contains details for one aspect of the current
state of this API Resource.
properties:
lastTransitionTime:
description: |-
lastTransitionTime is the last time the condition transitioned from one status to another.
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: |-
message is a human readable message indicating details about the transition.
This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: |-
observedGeneration represents the .metadata.generation that the condition was set based upon.
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: |-
reason contains a programmatic identifier indicating the reason for the condition's last transition.
Producers of specific condition types may define expected values and meanings for this field,
and whether the values are considered a guaranteed API.
The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True, False, Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
required:
- lastTransitionTime
- message
- reason
- status
- type
type: object
type: array
lastHandledReconcileAt:
description: |-
LastHandledReconcileAt holds the value of the most recent
reconcile request value, so a change of the annotation value
can be detected.
type: string
observedGeneration:
description: |-
ObservedGeneration is the last observed generation of the HelmRepository
object.
format: int64
type: integer
url:
description: |-
URL is the dynamic fetch link for the latest Artifact.
It is provided on a "best effort" basis, and using the precise
HelmRepositoryStatus.Artifact data is recommended.
type: string
type: object
type: object
served: true
storage: true
subresources:
status: {}
- additionalPrinterColumns: - additionalPrinterColumns:
- jsonPath: .spec.url - jsonPath: .spec.url
name: URL name: URL
@ -331,27 +31,20 @@ spec:
- jsonPath: .metadata.creationTimestamp - jsonPath: .metadata.creationTimestamp
name: Age name: Age
type: date type: date
deprecated: true
deprecationWarning: v1beta1 HelmRepository is deprecated, upgrade to v1
name: v1beta1 name: v1beta1
schema: schema:
openAPIV3Schema: openAPIV3Schema:
description: HelmRepository is the Schema for the helmrepositories API description: HelmRepository is the Schema for the helmrepositories API
properties: properties:
apiVersion: apiVersion:
description: |- description: 'APIVersion defines the versioned schema of this representation
APIVersion defines the versioned schema of this representation of an object. of an object. Servers should convert recognized schemas to the latest
Servers should convert recognized schemas to the latest internal value, and internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string type: string
kind: kind:
description: |- description: 'Kind is a string value representing the REST resource this
Kind is a string value representing the REST resource this object represents. object represents. Servers may infer this from the endpoint the client
Servers may infer this from the endpoint the client submits requests to. submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string type: string
metadata: metadata:
type: object type: object
@ -363,21 +56,22 @@ spec:
cross-namespace references to this object. cross-namespace references to this object.
properties: properties:
namespaceSelectors: namespaceSelectors:
description: |- description: NamespaceSelectors is the list of namespace selectors
NamespaceSelectors is the list of namespace selectors to which this ACL applies. to which this ACL applies. Items in this list are evaluated
Items in this list are evaluated using a logical OR operation. using a logical OR operation.
items: items:
description: |- description: NamespaceSelector selects the namespaces to which
NamespaceSelector selects the namespaces to which this ACL applies. this ACL applies. An empty map of MatchLabels matches all
An empty map of MatchLabels matches all namespaces in a cluster. namespaces in a cluster.
properties: properties:
matchLabels: matchLabels:
additionalProperties: additionalProperties:
type: string type: string
description: |- description: MatchLabels is a map of {key,value} pairs.
MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels A single {key,value} in the matchLabels map is equivalent
map is equivalent to an element of matchExpressions, whose key field is "key", the to an element of matchExpressions, whose key field is
operator is "In", and the values array contains only "value". The requirements are ANDed. "key", the operator is "In", and the values array contains
only "value". The requirements are ANDed.
type: object type: object
type: object type: object
type: array type: array
@ -388,22 +82,18 @@ spec:
description: The interval at which to check the upstream for updates. description: The interval at which to check the upstream for updates.
type: string type: string
passCredentials: passCredentials:
description: |- description: PassCredentials allows the credentials from the SecretRef
PassCredentials allows the credentials from the SecretRef to be passed on to to be passed on to a host that does not match the host as defined
a host that does not match the host as defined in URL. in URL. This may be required if the host of the advertised chart
This may be required if the host of the advertised chart URLs in the index URLs in the index differ from the defined URL. Enabling this should
differ from the defined URL. be done with caution, as it can potentially result in credentials
Enabling this should be done with caution, as it can potentially result in getting stolen in a MITM-attack.
credentials getting stolen in a MITM-attack.
type: boolean type: boolean
secretRef: secretRef:
description: |- description: The name of the secret containing authentication credentials
The name of the secret containing authentication credentials for the Helm for the Helm repository. For HTTP/S basic auth the secret must contain
repository. username and password fields. For TLS the secret must contain a
For HTTP/S basic auth the secret must contain username and certFile and keyFile, and/or caCert fields.
password fields.
For TLS the secret must contain a certFile and keyFile, and/or
caFile fields.
properties: properties:
name: name:
description: Name of the referent. description: Name of the referent.
@ -440,60 +130,66 @@ spec:
description: Checksum is the SHA256 checksum of the artifact. description: Checksum is the SHA256 checksum of the artifact.
type: string type: string
lastUpdateTime: lastUpdateTime:
description: |- description: LastUpdateTime is the timestamp corresponding to
LastUpdateTime is the timestamp corresponding to the last update of this the last update of this artifact.
artifact.
format: date-time format: date-time
type: string type: string
path: path:
description: Path is the relative file path of this artifact. description: Path is the relative file path of this artifact.
type: string type: string
revision: revision:
description: |- description: Revision is a human readable identifier traceable
Revision is a human readable identifier traceable in the origin source in the origin source system. It can be a Git commit SHA, Git
system. It can be a Git commit SHA, Git tag, a Helm index timestamp, a Helm tag, a Helm index timestamp, a Helm chart version, etc.
chart version, etc.
type: string type: string
url: url:
description: URL is the HTTP address of this artifact. description: URL is the HTTP address of this artifact.
type: string type: string
required: required:
- lastUpdateTime
- path - path
- url - url
type: object type: object
conditions: conditions:
description: Conditions holds the conditions for the HelmRepository. description: Conditions holds the conditions for the HelmRepository.
items: items:
description: Condition contains details for one aspect of the current description: "Condition contains details for one aspect of the current
state of this API Resource. state of this API Resource. --- This struct is intended for direct
use as an array at the field path .status.conditions. For example,
type FooStatus struct{ // Represents the observations of a
foo's current state. // Known .status.conditions.type are:
\"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type
\ // +patchStrategy=merge // +listType=map // +listMapKey=type
\ Conditions []metav1.Condition `json:\"conditions,omitempty\"
patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`
\n // other fields }"
properties: properties:
lastTransitionTime: lastTransitionTime:
description: |- description: lastTransitionTime is the last time the condition
lastTransitionTime is the last time the condition transitioned from one status to another. transitioned from one status to another. This should be when
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. the underlying condition changed. If that is not known, then
using the time when the API field changed is acceptable.
format: date-time format: date-time
type: string type: string
message: message:
description: |- description: message is a human readable message indicating
message is a human readable message indicating details about the transition. details about the transition. This may be an empty string.
This may be an empty string.
maxLength: 32768 maxLength: 32768
type: string type: string
observedGeneration: observedGeneration:
description: |- description: observedGeneration represents the .metadata.generation
observedGeneration represents the .metadata.generation that the condition was set based upon. that the condition was set based upon. For instance, if .metadata.generation
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date is currently 12, but the .status.conditions[x].observedGeneration
with respect to the current state of the instance. is 9, the condition is out of date with respect to the current
state of the instance.
format: int64 format: int64
minimum: 0 minimum: 0
type: integer type: integer
reason: reason:
description: |- description: reason contains a programmatic identifier indicating
reason contains a programmatic identifier indicating the reason for the condition's last transition. the reason for the condition's last transition. Producers
Producers of specific condition types may define expected values and meanings for this field, of specific condition types may define expected values and
and whether the values are considered a guaranteed API. meanings for this field, and whether the values are considered
The value should be a CamelCase string. a guaranteed API. The value should be a CamelCase string.
This field may not be empty. This field may not be empty.
maxLength: 1024 maxLength: 1024
minLength: 1 minLength: 1
@ -508,6 +204,10 @@ spec:
type: string type: string
type: type:
description: type of condition in CamelCase or in foo.example.com/CamelCase. description: type of condition in CamelCase or in foo.example.com/CamelCase.
--- Many .condition.type values are consistent across resources
like Available, but because arbitrary conditions can be useful
(see .node.status.conditions), the ability to deconflict is
important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
maxLength: 316 maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string type: string
@ -520,10 +220,9 @@ spec:
type: object type: object
type: array type: array
lastHandledReconcileAt: lastHandledReconcileAt:
description: |- description: LastHandledReconcileAt holds the value of the most recent
LastHandledReconcileAt holds the value of the most recent reconcile request value, so a change of the annotation value can
reconcile request value, so a change of the annotation value be detected.
can be detected.
type: string type: string
observedGeneration: observedGeneration:
description: ObservedGeneration is the last observed generation. description: ObservedGeneration is the last observed generation.
@ -551,128 +250,71 @@ spec:
- jsonPath: .status.conditions[?(@.type=="Ready")].message - jsonPath: .status.conditions[?(@.type=="Ready")].message
name: Status name: Status
type: string type: string
deprecated: true
deprecationWarning: v1beta2 HelmRepository is deprecated, upgrade to v1
name: v1beta2 name: v1beta2
schema: schema:
openAPIV3Schema: openAPIV3Schema:
description: HelmRepository is the Schema for the helmrepositories API. description: HelmRepository is the Schema for the helmrepositories API.
properties: properties:
apiVersion: apiVersion:
description: |- description: 'APIVersion defines the versioned schema of this representation
APIVersion defines the versioned schema of this representation of an object. of an object. Servers should convert recognized schemas to the latest
Servers should convert recognized schemas to the latest internal value, and internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string type: string
kind: kind:
description: |- description: 'Kind is a string value representing the REST resource this
Kind is a string value representing the REST resource this object represents. object represents. Servers may infer this from the endpoint the client
Servers may infer this from the endpoint the client submits requests to. submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string type: string
metadata: metadata:
type: object type: object
spec: spec:
description: |- description: HelmRepositorySpec specifies the required configuration to
HelmRepositorySpec specifies the required configuration to produce an produce an Artifact for a Helm repository index YAML.
Artifact for a Helm repository index YAML.
properties: properties:
accessFrom: accessFrom:
description: |- description: 'AccessFrom specifies an Access Control List for allowing
AccessFrom specifies an Access Control List for allowing cross-namespace cross-namespace references to this object. NOTE: Not implemented,
references to this object. provisional as of https://github.com/fluxcd/flux2/pull/2092'
NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092
properties: properties:
namespaceSelectors: namespaceSelectors:
description: |- description: NamespaceSelectors is the list of namespace selectors
NamespaceSelectors is the list of namespace selectors to which this ACL applies. to which this ACL applies. Items in this list are evaluated
Items in this list are evaluated using a logical OR operation. using a logical OR operation.
items: items:
description: |- description: NamespaceSelector selects the namespaces to which
NamespaceSelector selects the namespaces to which this ACL applies. this ACL applies. An empty map of MatchLabels matches all
An empty map of MatchLabels matches all namespaces in a cluster. namespaces in a cluster.
properties: properties:
matchLabels: matchLabels:
additionalProperties: additionalProperties:
type: string type: string
description: |- description: MatchLabels is a map of {key,value} pairs.
MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels A single {key,value} in the matchLabels map is equivalent
map is equivalent to an element of matchExpressions, whose key field is "key", the to an element of matchExpressions, whose key field is
operator is "In", and the values array contains only "value". The requirements are ANDed. "key", the operator is "In", and the values array contains
only "value". The requirements are ANDed.
type: object type: object
type: object type: object
type: array type: array
required: required:
- namespaceSelectors - namespaceSelectors
type: object type: object
certSecretRef:
description: |-
CertSecretRef can be given the name of a Secret containing
either or both of
- a PEM-encoded client certificate (`tls.crt`) and private
key (`tls.key`);
- a PEM-encoded CA certificate (`ca.crt`)
and whichever are supplied, will be used for connecting to the
registry. The client cert and key are useful if you are
authenticating with a certificate; the CA cert is useful if
you are using a self-signed server certificate. The Secret must
be of type `Opaque` or `kubernetes.io/tls`.
It takes precedence over the values specified in the Secret referred
to by `.spec.secretRef`.
properties:
name:
description: Name of the referent.
type: string
required:
- name
type: object
insecure:
description: |-
Insecure allows connecting to a non-TLS HTTP container registry.
This field is only taken into account if the .spec.type field is set to 'oci'.
type: boolean
interval: interval:
description: |- description: Interval at which to check the URL for updates.
Interval at which the HelmRepository URL is checked for updates.
This interval is approximate and may be subject to jitter to ensure
efficient use of resources.
pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$
type: string type: string
passCredentials: passCredentials:
description: |- description: PassCredentials allows the credentials from the SecretRef
PassCredentials allows the credentials from the SecretRef to be passed to be passed on to a host that does not match the host as defined
on to a host that does not match the host as defined in URL. in URL. This may be required if the host of the advertised chart
This may be required if the host of the advertised chart URLs in the URLs in the index differ from the defined URL. Enabling this should
index differ from the defined URL. be done with caution, as it can potentially result in credentials
Enabling this should be done with caution, as it can potentially result getting stolen in a MITM-attack.
in credentials getting stolen in a MITM-attack.
type: boolean type: boolean
provider:
default: generic
description: |-
Provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'.
This field is optional, and only taken into account if the .spec.type field is set to 'oci'.
When not specified, defaults to 'generic'.
enum:
- generic
- aws
- azure
- gcp
type: string
secretRef: secretRef:
description: |- description: SecretRef specifies the Secret containing authentication
SecretRef specifies the Secret containing authentication credentials credentials for the HelmRepository. For HTTP/S basic auth the secret
for the HelmRepository. must contain 'username' and 'password' fields. For TLS the secret
For HTTP/S basic auth the secret must contain 'username' and 'password' must contain a 'certFile' and 'keyFile', and/or 'caCert' fields.
fields.
Support for TLS auth using the 'certFile' and 'keyFile', and/or 'caFile'
keys is deprecated. Please use `.spec.certSecretRef` instead.
properties: properties:
name: name:
description: Name of the referent. description: Name of the referent.
@ -681,33 +323,19 @@ spec:
- name - name
type: object type: object
suspend: suspend:
description: |- description: Suspend tells the controller to suspend the reconciliation
Suspend tells the controller to suspend the reconciliation of this of this HelmRepository.
HelmRepository.
type: boolean type: boolean
timeout: timeout:
description: |- default: 60s
Timeout is used for the index fetch operation for an HTTPS helm repository, description: Timeout of the index fetch operation, defaults to 60s.
and for remote OCI Repository operations like pulling for an OCI helm
chart by the associated HelmChart.
Its default value is 60s.
pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$
type: string
type:
description: |-
Type of the HelmRepository.
When this field is set to "oci", the URL field value must be prefixed with "oci://".
enum:
- default
- oci
type: string type: string
url: url:
description: |- description: URL of the Helm repository, a valid URL contains at least
URL of the Helm repository, a valid URL contains at least a protocol and a protocol and host.
host.
pattern: ^(http|https|oci)://.*$
type: string type: string
required: required:
- interval
- url - url
type: object type: object
status: status:
@ -719,80 +347,79 @@ spec:
description: Artifact represents the last successful HelmRepository description: Artifact represents the last successful HelmRepository
reconciliation. reconciliation.
properties: properties:
digest: checksum:
description: Digest is the digest of the file in the form of '<algorithm>:<checksum>'. description: Checksum is the SHA256 checksum of the Artifact file.
pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$
type: string type: string
lastUpdateTime: lastUpdateTime:
description: |- description: LastUpdateTime is the timestamp corresponding to
LastUpdateTime is the timestamp corresponding to the last update of the the last update of the Artifact.
Artifact.
format: date-time format: date-time
type: string type: string
metadata:
additionalProperties:
type: string
description: Metadata holds upstream information such as OCI annotations.
type: object
path: path:
description: |- description: Path is the relative file path of the Artifact. It
Path is the relative file path of the Artifact. It can be used to locate can be used to locate the file in the root of the Artifact storage
the file in the root of the Artifact storage on the local file system of on the local file system of the controller managing the Source.
the controller managing the Source.
type: string type: string
revision: revision:
description: |- description: Revision is a human-readable identifier traceable
Revision is a human-readable identifier traceable in the origin source in the origin source system. It can be a Git commit SHA, Git
system. It can be a Git commit SHA, Git tag, a Helm chart version, etc. tag, a Helm chart version, etc.
type: string type: string
size: size:
description: Size is the number of bytes in the file. description: Size is the number of bytes in the file.
format: int64 format: int64
type: integer type: integer
url: url:
description: |- description: URL is the HTTP address of the Artifact as exposed
URL is the HTTP address of the Artifact as exposed by the controller by the controller managing the Source. It can be used to retrieve
managing the Source. It can be used to retrieve the Artifact for the Artifact for consumption, e.g. by another controller applying
consumption, e.g. by another controller applying the Artifact contents. the Artifact contents.
type: string type: string
required: required:
- lastUpdateTime
- path - path
- revision
- url - url
type: object type: object
conditions: conditions:
description: Conditions holds the conditions for the HelmRepository. description: Conditions holds the conditions for the HelmRepository.
items: items:
description: Condition contains details for one aspect of the current description: "Condition contains details for one aspect of the current
state of this API Resource. state of this API Resource. --- This struct is intended for direct
use as an array at the field path .status.conditions. For example,
type FooStatus struct{ // Represents the observations of a
foo's current state. // Known .status.conditions.type are:
\"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type
\ // +patchStrategy=merge // +listType=map // +listMapKey=type
\ Conditions []metav1.Condition `json:\"conditions,omitempty\"
patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`
\n // other fields }"
properties: properties:
lastTransitionTime: lastTransitionTime:
description: |- description: lastTransitionTime is the last time the condition
lastTransitionTime is the last time the condition transitioned from one status to another. transitioned from one status to another. This should be when
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. the underlying condition changed. If that is not known, then
using the time when the API field changed is acceptable.
format: date-time format: date-time
type: string type: string
message: message:
description: |- description: message is a human readable message indicating
message is a human readable message indicating details about the transition. details about the transition. This may be an empty string.
This may be an empty string.
maxLength: 32768 maxLength: 32768
type: string type: string
observedGeneration: observedGeneration:
description: |- description: observedGeneration represents the .metadata.generation
observedGeneration represents the .metadata.generation that the condition was set based upon. that the condition was set based upon. For instance, if .metadata.generation
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date is currently 12, but the .status.conditions[x].observedGeneration
with respect to the current state of the instance. is 9, the condition is out of date with respect to the current
state of the instance.
format: int64 format: int64
minimum: 0 minimum: 0
type: integer type: integer
reason: reason:
description: |- description: reason contains a programmatic identifier indicating
reason contains a programmatic identifier indicating the reason for the condition's last transition. the reason for the condition's last transition. Producers
Producers of specific condition types may define expected values and meanings for this field, of specific condition types may define expected values and
and whether the values are considered a guaranteed API. meanings for this field, and whether the values are considered
The value should be a CamelCase string. a guaranteed API. The value should be a CamelCase string.
This field may not be empty. This field may not be empty.
maxLength: 1024 maxLength: 1024
minLength: 1 minLength: 1
@ -807,6 +434,10 @@ spec:
type: string type: string
type: type:
description: type of condition in CamelCase or in foo.example.com/CamelCase. description: type of condition in CamelCase or in foo.example.com/CamelCase.
--- Many .condition.type values are consistent across resources
like Available, but because arbitrary conditions can be useful
(see .node.status.conditions), the ability to deconflict is
important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
maxLength: 316 maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string type: string
@ -819,26 +450,29 @@ spec:
type: object type: object
type: array type: array
lastHandledReconcileAt: lastHandledReconcileAt:
description: |- description: LastHandledReconcileAt holds the value of the most recent
LastHandledReconcileAt holds the value of the most recent reconcile request value, so a change of the annotation value can
reconcile request value, so a change of the annotation value be detected.
can be detected.
type: string type: string
observedGeneration: observedGeneration:
description: |- description: ObservedGeneration is the last observed generation of
ObservedGeneration is the last observed generation of the HelmRepository the HelmRepository object.
object.
format: int64 format: int64
type: integer type: integer
url: url:
description: |- description: URL is the dynamic fetch link for the latest Artifact.
URL is the dynamic fetch link for the latest Artifact. It is provided on a "best effort" basis, and using the precise HelmRepositoryStatus.Artifact
It is provided on a "best effort" basis, and using the precise data is recommended.
HelmRepositoryStatus.Artifact data is recommended.
type: string type: string
type: object type: object
type: object type: object
served: true served: true
storage: false storage: true
subresources: subresources:
status: {} status: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

View File

@ -1,821 +0,0 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.16.1
name: ocirepositories.source.toolkit.fluxcd.io
spec:
group: source.toolkit.fluxcd.io
names:
kind: OCIRepository
listKind: OCIRepositoryList
plural: ocirepositories
shortNames:
- ocirepo
singular: ocirepository
scope: Namespaced
versions:
- additionalPrinterColumns:
- jsonPath: .spec.url
name: URL
type: string
- jsonPath: .status.conditions[?(@.type=="Ready")].status
name: Ready
type: string
- jsonPath: .status.conditions[?(@.type=="Ready")].message
name: Status
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1
schema:
openAPIV3Schema:
description: OCIRepository is the Schema for the ocirepositories API
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: OCIRepositorySpec defines the desired state of OCIRepository
properties:
certSecretRef:
description: |-
CertSecretRef can be given the name of a Secret containing
either or both of
- a PEM-encoded client certificate (`tls.crt`) and private
key (`tls.key`);
- a PEM-encoded CA certificate (`ca.crt`)
and whichever are supplied, will be used for connecting to the
registry. The client cert and key are useful if you are
authenticating with a certificate; the CA cert is useful if
you are using a self-signed server certificate. The Secret must
be of type `Opaque` or `kubernetes.io/tls`.
properties:
name:
description: Name of the referent.
type: string
required:
- name
type: object
ignore:
description: |-
Ignore overrides the set of excluded patterns in the .sourceignore format
(which is the same as .gitignore). If not provided, a default will be used,
consult the documentation for your version to find out what those are.
type: string
insecure:
description: Insecure allows connecting to a non-TLS HTTP container
registry.
type: boolean
interval:
description: |-
Interval at which the OCIRepository URL is checked for updates.
This interval is approximate and may be subject to jitter to ensure
efficient use of resources.
pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$
type: string
layerSelector:
description: |-
LayerSelector specifies which layer should be extracted from the OCI artifact.
When not specified, the first layer found in the artifact is selected.
properties:
mediaType:
description: |-
MediaType specifies the OCI media type of the layer
which should be extracted from the OCI Artifact. The
first layer matching this type is selected.
type: string
operation:
description: |-
Operation specifies how the selected layer should be processed.
By default, the layer compressed content is extracted to storage.
When the operation is set to 'copy', the layer compressed content
is persisted to storage as it is.
enum:
- extract
- copy
type: string
type: object
provider:
default: generic
description: |-
The provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'.
When not specified, defaults to 'generic'.
enum:
- generic
- aws
- azure
- gcp
type: string
proxySecretRef:
description: |-
ProxySecretRef specifies the Secret containing the proxy configuration
to use while communicating with the container registry.
properties:
name:
description: Name of the referent.
type: string
required:
- name
type: object
ref:
description: |-
The OCI reference to pull and monitor for changes,
defaults to the latest tag.
properties:
digest:
description: |-
Digest is the image digest to pull, takes precedence over SemVer.
The value should be in the format 'sha256:<HASH>'.
type: string
semver:
description: |-
SemVer is the range of tags to pull selecting the latest within
the range, takes precedence over Tag.
type: string
semverFilter:
description: SemverFilter is a regex pattern to filter the tags
within the SemVer range.
type: string
tag:
description: Tag is the image tag to pull, defaults to latest.
type: string
type: object
secretRef:
description: |-
SecretRef contains the secret name containing the registry login
credentials to resolve image metadata.
The secret must be of type kubernetes.io/dockerconfigjson.
properties:
name:
description: Name of the referent.
type: string
required:
- name
type: object
serviceAccountName:
description: |-
ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate
the image pull if the service account has attached pull secrets. For more information:
https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account
type: string
suspend:
description: This flag tells the controller to suspend the reconciliation
of this source.
type: boolean
timeout:
default: 60s
description: The timeout for remote OCI Repository operations like
pulling, defaults to 60s.
pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$
type: string
url:
description: |-
URL is a reference to an OCI artifact repository hosted
on a remote container registry.
pattern: ^oci://.*$
type: string
verify:
description: |-
Verify contains the secret name containing the trusted public keys
used to verify the signature and specifies which provider to use to check
whether OCI image is authentic.
properties:
matchOIDCIdentity:
description: |-
MatchOIDCIdentity specifies the identity matching criteria to use
while verifying an OCI artifact which was signed using Cosign keyless
signing. The artifact's identity is deemed to be verified if any of the
specified matchers match against the identity.
items:
description: |-
OIDCIdentityMatch specifies options for verifying the certificate identity,
i.e. the issuer and the subject of the certificate.
properties:
issuer:
description: |-
Issuer specifies the regex pattern to match against to verify
the OIDC issuer in the Fulcio certificate. The pattern must be a
valid Go regular expression.
type: string
subject:
description: |-
Subject specifies the regex pattern to match against to verify
the identity subject in the Fulcio certificate. The pattern must
be a valid Go regular expression.
type: string
required:
- issuer
- subject
type: object
type: array
provider:
default: cosign
description: Provider specifies the technology used to sign the
OCI Artifact.
enum:
- cosign
- notation
type: string
secretRef:
description: |-
SecretRef specifies the Kubernetes Secret containing the
trusted public keys.
properties:
name:
description: Name of the referent.
type: string
required:
- name
type: object
required:
- provider
type: object
required:
- interval
- url
type: object
status:
default:
observedGeneration: -1
description: OCIRepositoryStatus defines the observed state of OCIRepository
properties:
artifact:
description: Artifact represents the output of the last successful
OCI Repository sync.
properties:
digest:
description: Digest is the digest of the file in the form of '<algorithm>:<checksum>'.
pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$
type: string
lastUpdateTime:
description: |-
LastUpdateTime is the timestamp corresponding to the last update of the
Artifact.
format: date-time
type: string
metadata:
additionalProperties:
type: string
description: Metadata holds upstream information such as OCI annotations.
type: object
path:
description: |-
Path is the relative file path of the Artifact. It can be used to locate
the file in the root of the Artifact storage on the local file system of
the controller managing the Source.
type: string
revision:
description: |-
Revision is a human-readable identifier traceable in the origin source
system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.
type: string
size:
description: Size is the number of bytes in the file.
format: int64
type: integer
url:
description: |-
URL is the HTTP address of the Artifact as exposed by the controller
managing the Source. It can be used to retrieve the Artifact for
consumption, e.g. by another controller applying the Artifact contents.
type: string
required:
- lastUpdateTime
- path
- revision
- url
type: object
conditions:
description: Conditions holds the conditions for the OCIRepository.
items:
description: Condition contains details for one aspect of the current
state of this API Resource.
properties:
lastTransitionTime:
description: |-
lastTransitionTime is the last time the condition transitioned from one status to another.
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: |-
message is a human readable message indicating details about the transition.
This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: |-
observedGeneration represents the .metadata.generation that the condition was set based upon.
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: |-
reason contains a programmatic identifier indicating the reason for the condition's last transition.
Producers of specific condition types may define expected values and meanings for this field,
and whether the values are considered a guaranteed API.
The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True, False, Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
required:
- lastTransitionTime
- message
- reason
- status
- type
type: object
type: array
lastHandledReconcileAt:
description: |-
LastHandledReconcileAt holds the value of the most recent
reconcile request value, so a change of the annotation value
can be detected.
type: string
observedGeneration:
description: ObservedGeneration is the last observed generation.
format: int64
type: integer
observedIgnore:
description: |-
ObservedIgnore is the observed exclusion patterns used for constructing
the source artifact.
type: string
observedLayerSelector:
description: |-
ObservedLayerSelector is the observed layer selector used for constructing
the source artifact.
properties:
mediaType:
description: |-
MediaType specifies the OCI media type of the layer
which should be extracted from the OCI Artifact. The
first layer matching this type is selected.
type: string
operation:
description: |-
Operation specifies how the selected layer should be processed.
By default, the layer compressed content is extracted to storage.
When the operation is set to 'copy', the layer compressed content
is persisted to storage as it is.
enum:
- extract
- copy
type: string
type: object
url:
description: URL is the download link for the artifact output of the
last OCI Repository sync.
type: string
type: object
type: object
served: true
storage: true
subresources:
status: {}
- additionalPrinterColumns:
- jsonPath: .spec.url
name: URL
type: string
- jsonPath: .status.conditions[?(@.type=="Ready")].status
name: Ready
type: string
- jsonPath: .status.conditions[?(@.type=="Ready")].message
name: Status
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
deprecated: true
deprecationWarning: v1beta2 OCIRepository is deprecated, upgrade to v1
name: v1beta2
schema:
openAPIV3Schema:
description: OCIRepository is the Schema for the ocirepositories API
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: OCIRepositorySpec defines the desired state of OCIRepository
properties:
certSecretRef:
description: |-
CertSecretRef can be given the name of a Secret containing
either or both of
- a PEM-encoded client certificate (`tls.crt`) and private
key (`tls.key`);
- a PEM-encoded CA certificate (`ca.crt`)
and whichever are supplied, will be used for connecting to the
registry. The client cert and key are useful if you are
authenticating with a certificate; the CA cert is useful if
you are using a self-signed server certificate. The Secret must
be of type `Opaque` or `kubernetes.io/tls`.
Note: Support for the `caFile`, `certFile` and `keyFile` keys have
been deprecated.
properties:
name:
description: Name of the referent.
type: string
required:
- name
type: object
ignore:
description: |-
Ignore overrides the set of excluded patterns in the .sourceignore format
(which is the same as .gitignore). If not provided, a default will be used,
consult the documentation for your version to find out what those are.
type: string
insecure:
description: Insecure allows connecting to a non-TLS HTTP container
registry.
type: boolean
interval:
description: |-
Interval at which the OCIRepository URL is checked for updates.
This interval is approximate and may be subject to jitter to ensure
efficient use of resources.
pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$
type: string
layerSelector:
description: |-
LayerSelector specifies which layer should be extracted from the OCI artifact.
When not specified, the first layer found in the artifact is selected.
properties:
mediaType:
description: |-
MediaType specifies the OCI media type of the layer
which should be extracted from the OCI Artifact. The
first layer matching this type is selected.
type: string
operation:
description: |-
Operation specifies how the selected layer should be processed.
By default, the layer compressed content is extracted to storage.
When the operation is set to 'copy', the layer compressed content
is persisted to storage as it is.
enum:
- extract
- copy
type: string
type: object
provider:
default: generic
description: |-
The provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'.
When not specified, defaults to 'generic'.
enum:
- generic
- aws
- azure
- gcp
type: string
proxySecretRef:
description: |-
ProxySecretRef specifies the Secret containing the proxy configuration
to use while communicating with the container registry.
properties:
name:
description: Name of the referent.
type: string
required:
- name
type: object
ref:
description: |-
The OCI reference to pull and monitor for changes,
defaults to the latest tag.
properties:
digest:
description: |-
Digest is the image digest to pull, takes precedence over SemVer.
The value should be in the format 'sha256:<HASH>'.
type: string
semver:
description: |-
SemVer is the range of tags to pull selecting the latest within
the range, takes precedence over Tag.
type: string
semverFilter:
description: SemverFilter is a regex pattern to filter the tags
within the SemVer range.
type: string
tag:
description: Tag is the image tag to pull, defaults to latest.
type: string
type: object
secretRef:
description: |-
SecretRef contains the secret name containing the registry login
credentials to resolve image metadata.
The secret must be of type kubernetes.io/dockerconfigjson.
properties:
name:
description: Name of the referent.
type: string
required:
- name
type: object
serviceAccountName:
description: |-
ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate
the image pull if the service account has attached pull secrets. For more information:
https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account
type: string
suspend:
description: This flag tells the controller to suspend the reconciliation
of this source.
type: boolean
timeout:
default: 60s
description: The timeout for remote OCI Repository operations like
pulling, defaults to 60s.
pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$
type: string
url:
description: |-
URL is a reference to an OCI artifact repository hosted
on a remote container registry.
pattern: ^oci://.*$
type: string
verify:
description: |-
Verify contains the secret name containing the trusted public keys
used to verify the signature and specifies which provider to use to check
whether OCI image is authentic.
properties:
matchOIDCIdentity:
description: |-
MatchOIDCIdentity specifies the identity matching criteria to use
while verifying an OCI artifact which was signed using Cosign keyless
signing. The artifact's identity is deemed to be verified if any of the
specified matchers match against the identity.
items:
description: |-
OIDCIdentityMatch specifies options for verifying the certificate identity,
i.e. the issuer and the subject of the certificate.
properties:
issuer:
description: |-
Issuer specifies the regex pattern to match against to verify
the OIDC issuer in the Fulcio certificate. The pattern must be a
valid Go regular expression.
type: string
subject:
description: |-
Subject specifies the regex pattern to match against to verify
the identity subject in the Fulcio certificate. The pattern must
be a valid Go regular expression.
type: string
required:
- issuer
- subject
type: object
type: array
provider:
default: cosign
description: Provider specifies the technology used to sign the
OCI Artifact.
enum:
- cosign
- notation
type: string
secretRef:
description: |-
SecretRef specifies the Kubernetes Secret containing the
trusted public keys.
properties:
name:
description: Name of the referent.
type: string
required:
- name
type: object
required:
- provider
type: object
required:
- interval
- url
type: object
status:
default:
observedGeneration: -1
description: OCIRepositoryStatus defines the observed state of OCIRepository
properties:
artifact:
description: Artifact represents the output of the last successful
OCI Repository sync.
properties:
digest:
description: Digest is the digest of the file in the form of '<algorithm>:<checksum>'.
pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$
type: string
lastUpdateTime:
description: |-
LastUpdateTime is the timestamp corresponding to the last update of the
Artifact.
format: date-time
type: string
metadata:
additionalProperties:
type: string
description: Metadata holds upstream information such as OCI annotations.
type: object
path:
description: |-
Path is the relative file path of the Artifact. It can be used to locate
the file in the root of the Artifact storage on the local file system of
the controller managing the Source.
type: string
revision:
description: |-
Revision is a human-readable identifier traceable in the origin source
system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.
type: string
size:
description: Size is the number of bytes in the file.
format: int64
type: integer
url:
description: |-
URL is the HTTP address of the Artifact as exposed by the controller
managing the Source. It can be used to retrieve the Artifact for
consumption, e.g. by another controller applying the Artifact contents.
type: string
required:
- lastUpdateTime
- path
- revision
- url
type: object
conditions:
description: Conditions holds the conditions for the OCIRepository.
items:
description: Condition contains details for one aspect of the current
state of this API Resource.
properties:
lastTransitionTime:
description: |-
lastTransitionTime is the last time the condition transitioned from one status to another.
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: |-
message is a human readable message indicating details about the transition.
This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: |-
observedGeneration represents the .metadata.generation that the condition was set based upon.
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: |-
reason contains a programmatic identifier indicating the reason for the condition's last transition.
Producers of specific condition types may define expected values and meanings for this field,
and whether the values are considered a guaranteed API.
The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True, False, Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
required:
- lastTransitionTime
- message
- reason
- status
- type
type: object
type: array
contentConfigChecksum:
description: |-
ContentConfigChecksum is a checksum of all the configurations related to
the content of the source artifact:
- .spec.ignore
- .spec.layerSelector
observed in .status.observedGeneration version of the object. This can
be used to determine if the content configuration has changed and the
artifact needs to be rebuilt.
It has the format of `<algo>:<checksum>`, for example: `sha256:<checksum>`.
Deprecated: Replaced with explicit fields for observed artifact content
config in the status.
type: string
lastHandledReconcileAt:
description: |-
LastHandledReconcileAt holds the value of the most recent
reconcile request value, so a change of the annotation value
can be detected.
type: string
observedGeneration:
description: ObservedGeneration is the last observed generation.
format: int64
type: integer
observedIgnore:
description: |-
ObservedIgnore is the observed exclusion patterns used for constructing
the source artifact.
type: string
observedLayerSelector:
description: |-
ObservedLayerSelector is the observed layer selector used for constructing
the source artifact.
properties:
mediaType:
description: |-
MediaType specifies the OCI media type of the layer
which should be extracted from the OCI Artifact. The
first layer matching this type is selected.
type: string
operation:
description: |-
Operation specifies how the selected layer should be processed.
By default, the layer compressed content is extracted to storage.
When the operation is set to 'copy', the layer compressed content
is persisted to storage as it is.
enum:
- extract
- copy
type: string
type: object
url:
description: URL is the download link for the artifact output of the
last OCI Repository sync.
type: string
type: object
type: object
served: true
storage: false
subresources:
status: {}

View File

@ -5,5 +5,4 @@ resources:
- bases/source.toolkit.fluxcd.io_helmrepositories.yaml - bases/source.toolkit.fluxcd.io_helmrepositories.yaml
- bases/source.toolkit.fluxcd.io_helmcharts.yaml - bases/source.toolkit.fluxcd.io_helmcharts.yaml
- bases/source.toolkit.fluxcd.io_buckets.yaml - bases/source.toolkit.fluxcd.io_buckets.yaml
- bases/source.toolkit.fluxcd.io_ocirepositories.yaml
# +kubebuilder:scaffold:crdkustomizeresource # +kubebuilder:scaffold:crdkustomizeresource

View File

@ -51,8 +51,6 @@ spec:
valueFrom: valueFrom:
fieldRef: fieldRef:
fieldPath: metadata.namespace fieldPath: metadata.namespace
- name: TUF_ROOT # store the Fulcio root CA file in tmp
value: "/tmp/.sigstore"
args: args:
- --watch-all-namespaces - --watch-all-namespaces
- --log-level=info - --log-level=info

View File

@ -6,4 +6,4 @@ resources:
images: images:
- name: fluxcd/source-controller - name: fluxcd/source-controller
newName: fluxcd/source-controller newName: fluxcd/source-controller
newTag: v1.6.0 newTag: v0.24.1

View File

@ -1,24 +0,0 @@
# permissions for end users to edit ocirepositories.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: ocirepository-editor-role
rules:
- apiGroups:
- source.toolkit.fluxcd.io
resources:
- ocirepositories
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- source.toolkit.fluxcd.io
resources:
- ocirepositories/status
verbs:
- get

View File

@ -1,20 +0,0 @@
# permissions for end users to view ocirepositories.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: ocirepository-viewer-role
rules:
- apiGroups:
- source.toolkit.fluxcd.io
resources:
- ocirepositories
verbs:
- get
- list
- watch
- apiGroups:
- source.toolkit.fluxcd.io
resources:
- ocirepositories/status
verbs:
- get

View File

@ -1,7 +1,9 @@
--- ---
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole kind: ClusterRole
metadata: metadata:
creationTimestamp: null
name: manager-role name: manager-role
rules: rules:
- apiGroups: - apiGroups:
@ -19,20 +21,10 @@ rules:
- get - get
- list - list
- watch - watch
- apiGroups:
- ""
resources:
- serviceaccounts/token
verbs:
- create
- apiGroups: - apiGroups:
- source.toolkit.fluxcd.io - source.toolkit.fluxcd.io
resources: resources:
- buckets - buckets
- gitrepositories
- helmcharts
- helmrepositories
- ocirepositories
verbs: verbs:
- create - create
- delete - delete
@ -45,10 +37,6 @@ rules:
- source.toolkit.fluxcd.io - source.toolkit.fluxcd.io
resources: resources:
- buckets/finalizers - buckets/finalizers
- gitrepositories/finalizers
- helmcharts/finalizers
- helmrepositories/finalizers
- ocirepositories/finalizers
verbs: verbs:
- create - create
- delete - delete
@ -59,10 +47,96 @@ rules:
- source.toolkit.fluxcd.io - source.toolkit.fluxcd.io
resources: resources:
- buckets/status - buckets/status
- gitrepositories/status verbs:
- helmcharts/status - get
- helmrepositories/status - patch
- ocirepositories/status - update
- apiGroups:
- source.toolkit.fluxcd.io
resources:
- gitrepositories
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- source.toolkit.fluxcd.io
resources:
- gitrepositories/finalizers
verbs:
- create
- delete
- get
- patch
- update
- apiGroups:
- source.toolkit.fluxcd.io
resources:
- gitrepositories/status
verbs:
- get
- patch
- update
- apiGroups:
- source.toolkit.fluxcd.io
resources:
- helmcharts
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- source.toolkit.fluxcd.io
resources:
- helmcharts/finalizers
verbs:
- create
- delete
- get
- patch
- update
- apiGroups:
- source.toolkit.fluxcd.io
resources:
- helmcharts/status
verbs:
- get
- patch
- update
- apiGroups:
- source.toolkit.fluxcd.io
resources:
- helmrepositories
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- source.toolkit.fluxcd.io
resources:
- helmrepositories/finalizers
verbs:
- create
- delete
- get
- patch
- update
- apiGroups:
- source.toolkit.fluxcd.io
resources:
- helmrepositories/status
verbs: verbs:
- get - get
- patch - patch

View File

@ -1,11 +0,0 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmChart
metadata:
name: helmchart-sample-oci
spec:
chart: stefanprodan/charts/podinfo
version: '>=6.0.0 <7.0.0'
sourceRef:
kind: HelmRepository
name: helmrepository-sample-oci
interval: 1m

View File

@ -1,8 +0,0 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: helmrepository-sample-oci
spec:
interval: 1m
type: oci
url: oci://ghcr.io/

View File

@ -1,9 +0,0 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: OCIRepository
metadata:
name: ocirepository-sample
spec:
interval: 1m
url: oci://ghcr.io/stefanprodan/manifests/podinfo
ref:
tag: 6.1.6

View File

@ -1,4 +1,4 @@
apiVersion: source.toolkit.fluxcd.io/v1 apiVersion: source.toolkit.fluxcd.io/v1beta2
kind: Bucket kind: Bucket
metadata: metadata:
name: bucket-sample name: bucket-sample

View File

@ -1,4 +1,4 @@
apiVersion: source.toolkit.fluxcd.io/v1 apiVersion: source.toolkit.fluxcd.io/v1beta2
kind: GitRepository kind: GitRepository
metadata: metadata:
name: gitrepository-sample name: gitrepository-sample

View File

@ -1,4 +1,4 @@
apiVersion: source.toolkit.fluxcd.io/v1 apiVersion: source.toolkit.fluxcd.io/v1beta2
kind: HelmChart kind: HelmChart
metadata: metadata:
name: helmchart-git-sample name: helmchart-git-sample

View File

@ -1,12 +1,11 @@
apiVersion: source.toolkit.fluxcd.io/v1 apiVersion: source.toolkit.fluxcd.io/v1beta2
kind: HelmChart kind: HelmChart
metadata: metadata:
name: helmchart-sample name: helmchart-sample
spec: spec:
chart: podinfo chart: podinfo
version: '6.x' version: '>=2.0.0 <3.0.0'
sourceRef: sourceRef:
kind: HelmRepository kind: HelmRepository
name: helmrepository-sample name: helmrepository-sample
interval: 1m interval: 1m
ignoreMissingValuesFiles: true

View File

@ -1,4 +1,4 @@
apiVersion: source.toolkit.fluxcd.io/v1 apiVersion: source.toolkit.fluxcd.io/v1beta2
kind: HelmRepository kind: HelmRepository
metadata: metadata:
name: helmrepository-sample name: helmrepository-sample

View File

@ -1,5 +1,5 @@
--- ---
apiVersion: source.toolkit.fluxcd.io/v1 apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: Bucket kind: Bucket
metadata: metadata:
name: podinfo name: podinfo

View File

@ -1,10 +1,29 @@
apiVersion: source.toolkit.fluxcd.io/v1 apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: GitRepository kind: GitRepository
metadata: metadata:
name: large-repo name: large-repo-go-git
spec: spec:
gitImplementation: go-git
interval: 10m interval: 10m
timeout: 2m timeout: 2m
url: https://github.com/nodejs/node.git url: https://github.com/hashgraph/hedera-mirror-node.git
ref: ref:
branch: main branch: main
ignore: |
/*
!/charts
---
apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: GitRepository
metadata:
name: large-repo-libgit2
spec:
gitImplementation: libgit2
interval: 10m
timeout: 2m
url: https://github.com/hashgraph/hedera-mirror-node.git
ref:
branch: main
ignore: |
/*
!/charts

View File

@ -1,5 +1,5 @@
--- ---
apiVersion: source.toolkit.fluxcd.io/v1 apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: Bucket kind: Bucket
metadata: metadata:
name: charts name: charts
@ -13,7 +13,7 @@ spec:
secretRef: secretRef:
name: minio-credentials name: minio-credentials
--- ---
apiVersion: source.toolkit.fluxcd.io/v1 apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: HelmChart kind: HelmChart
metadata: metadata:
name: helmchart-bucket name: helmchart-bucket

View File

@ -1,25 +0,0 @@
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: podinfo-notation
spec:
url: oci://ghcr.io/stefanprodan/charts
type: "oci"
interval: 1m
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmChart
metadata:
name: podinfo-notation
spec:
chart: podinfo
sourceRef:
kind: HelmRepository
name: podinfo-notation
version: '6.6.0'
interval: 1m
verify:
provider: notation
secretRef:
name: notation-config

View File

@ -1,35 +0,0 @@
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: podinfo
spec:
url: oci://ghcr.io/stefanprodan/charts
type: "oci"
interval: 1m
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmChart
metadata:
name: podinfo
spec:
chart: podinfo
sourceRef:
kind: HelmRepository
name: podinfo
version: '6.1.*'
interval: 1m
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmChart
metadata:
name: podinfo-keyless
spec:
chart: podinfo
sourceRef:
kind: HelmRepository
name: podinfo
version: '6.2.1'
interval: 1m
verify:
provider: cosign

View File

@ -1,14 +0,0 @@
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: OCIRepository
metadata:
name: podinfo-deploy-signed-with-key
spec:
interval: 5m
url: oci://ghcr.io/stefanprodan/podinfo-deploy
ref:
semver: "6.2.x"
verify:
provider: cosign
secretRef:
name: cosign-key

View File

@ -1,12 +0,0 @@
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: OCIRepository
metadata:
name: podinfo-deploy-signed-with-keyless
spec:
interval: 5m
url: oci://ghcr.io/stefanprodan/manifests/podinfo
ref:
semver: "6.2.x"
verify:
provider: cosign

View File

@ -1,14 +0,0 @@
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: OCIRepository
metadata:
name: podinfo-deploy-signed-with-notation
spec:
interval: 5m
url: oci://ghcr.io/stefanprodan/podinfo-deploy
ref:
semver: "6.6.x"
verify:
provider: notation
secretRef:
name: notation-config

View File

@ -14,9 +14,9 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package controller package controllers
import sourcev1 "github.com/fluxcd/source-controller/api/v1" import sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
type artifactSet []*sourcev1.Artifact type artifactSet []*sourcev1.Artifact
@ -37,3 +37,25 @@ outer:
} }
return false return false
} }
// hasArtifactUpdated returns true if any of the revisions in the current artifacts
// does not match any of the artifacts in the updated artifacts
// NOTE: artifactSet is a replacement for this. Remove this once it's not used
// anywhere.
func hasArtifactUpdated(current []*sourcev1.Artifact, updated []*sourcev1.Artifact) bool {
if len(current) != len(updated) {
return true
}
OUTER:
for _, c := range current {
for _, u := range updated {
if u.HasRevision(c.Revision) {
continue OUTER
}
}
return true
}
return false
}

View File

@ -14,12 +14,12 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package controller package controllers
import ( import (
"fmt" "fmt"
sourcev1 "github.com/fluxcd/source-controller/api/v1" sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"github.com/onsi/gomega/types" "github.com/onsi/gomega/types"
) )
@ -51,6 +51,9 @@ func (m matchArtifact) Match(actual interface{}) (success bool, err error) {
if ok, err = Equal(m.expected.Revision).Match(actualArtifact.Revision); !ok { if ok, err = Equal(m.expected.Revision).Match(actualArtifact.Revision); !ok {
return ok, err return ok, err
} }
if ok, err = Equal(m.expected.Checksum).Match(actualArtifact.Checksum); !ok {
return ok, err
}
if ok, err = Equal(m.expected.Size).Match(actualArtifact.Size); !ok { if ok, err = Equal(m.expected.Size).Match(actualArtifact.Size); !ok {
return ok, err return ok, err
} }

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package controller package controllers
import ( import (
"testing" "testing"

View File

@ -14,54 +14,48 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package controller package controllers
import ( import (
"context" "context"
stdtls "crypto/tls" "crypto/sha256"
"errors" "errors"
"fmt" "fmt"
"net/url"
"os" "os"
"path/filepath" "path/filepath"
"sort"
"strings" "strings"
"sync"
"time" "time"
"github.com/opencontainers/go-digest" "github.com/fluxcd/source-controller/pkg/azure"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
"golang.org/x/sync/semaphore" "golang.org/x/sync/semaphore"
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
kuberecorder "k8s.io/client-go/tools/record" kuberecorder "k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
ctrl "sigs.k8s.io/controller-runtime" ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/ratelimiter"
eventv1 "github.com/fluxcd/pkg/apis/event/v1beta1"
"github.com/fluxcd/pkg/apis/meta" "github.com/fluxcd/pkg/apis/meta"
"github.com/fluxcd/pkg/runtime/conditions" "github.com/fluxcd/pkg/runtime/conditions"
helper "github.com/fluxcd/pkg/runtime/controller" helper "github.com/fluxcd/pkg/runtime/controller"
"github.com/fluxcd/pkg/runtime/jitter" "github.com/fluxcd/pkg/runtime/events"
"github.com/fluxcd/pkg/runtime/patch" "github.com/fluxcd/pkg/runtime/patch"
"github.com/fluxcd/pkg/runtime/predicates" "github.com/fluxcd/pkg/runtime/predicates"
rreconcile "github.com/fluxcd/pkg/runtime/reconcile"
"github.com/fluxcd/pkg/sourceignore"
sourcev1 "github.com/fluxcd/source-controller/api/v1" sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
intdigest "github.com/fluxcd/source-controller/internal/digest"
serror "github.com/fluxcd/source-controller/internal/error" serror "github.com/fluxcd/source-controller/internal/error"
"github.com/fluxcd/source-controller/internal/index"
sreconcile "github.com/fluxcd/source-controller/internal/reconcile" sreconcile "github.com/fluxcd/source-controller/internal/reconcile"
"github.com/fluxcd/source-controller/internal/reconcile/summarize" "github.com/fluxcd/source-controller/internal/reconcile/summarize"
"github.com/fluxcd/source-controller/internal/tls"
"github.com/fluxcd/source-controller/pkg/azure"
"github.com/fluxcd/source-controller/pkg/gcp" "github.com/fluxcd/source-controller/pkg/gcp"
"github.com/fluxcd/source-controller/pkg/minio" "github.com/fluxcd/source-controller/pkg/minio"
"github.com/fluxcd/source-controller/pkg/sourceignore"
) )
// maxConcurrentBucketFetches is the upper bound on the goroutines used to // maxConcurrentBucketFetches is the upper bound on the goroutines used to
@ -77,7 +71,7 @@ import (
const maxConcurrentBucketFetches = 100 const maxConcurrentBucketFetches = 100
// bucketReadyCondition contains the information required to summarize a // bucketReadyCondition contains the information required to summarize a
// v1.Bucket Ready Condition. // v1beta2.Bucket Ready Condition.
var bucketReadyCondition = summarize.Conditions{ var bucketReadyCondition = summarize.Conditions{
Target: meta.ReadyCondition, Target: meta.ReadyCondition,
Owned: []string{ Owned: []string{
@ -117,7 +111,7 @@ var bucketFailConditions = []string{
// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets/finalizers,verbs=get;create;update;patch;delete // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets/finalizers,verbs=get;create;update;patch;delete
// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch // +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch
// BucketReconciler reconciles a v1.Bucket object. // BucketReconciler reconciles a v1beta2.Bucket object.
type BucketReconciler struct { type BucketReconciler struct {
client.Client client.Client
kuberecorder.EventRecorder kuberecorder.EventRecorder
@ -125,12 +119,11 @@ type BucketReconciler struct {
Storage *Storage Storage *Storage
ControllerName string ControllerName string
patchOptions []patch.Option
} }
type BucketReconcilerOptions struct { type BucketReconcilerOptions struct {
RateLimiter workqueue.TypedRateLimiter[reconcile.Request] MaxConcurrentReconciles int
RateLimiter ratelimiter.RateLimiter
} }
// BucketProvider is an interface for fetching objects from a storage provider // BucketProvider is an interface for fetching objects from a storage provider
@ -147,7 +140,7 @@ type BucketProvider interface {
// bucket, calling visit for every item. // bucket, calling visit for every item.
// If the underlying client or the visit callback returns an error, // If the underlying client or the visit callback returns an error,
// it returns early. // it returns early.
VisitObjects(ctx context.Context, bucketName string, prefix string, visit func(key, etag string) error) error VisitObjects(ctx context.Context, bucketName string, visit func(key, etag string) error) error
// ObjectIsNotFound returns true if the given error indicates an object // ObjectIsNotFound returns true if the given error indicates an object
// could not be found. // could not be found.
ObjectIsNotFound(error) bool ObjectIsNotFound(error) bool
@ -155,23 +148,98 @@ type BucketProvider interface {
Close(context.Context) Close(context.Context)
} }
// bucketReconcileFunc is the function type for all the v1.Bucket // bucketReconcileFunc is the function type for all the v1beta2.Bucket
// (sub)reconcile functions. The type implementations are grouped and // (sub)reconcile functions. The type implementations are grouped and
// executed serially to perform the complete reconcile of the object. // executed serially to perform the complete reconcile of the object.
type bucketReconcileFunc func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.Bucket, index *index.Digester, dir string) (sreconcile.Result, error) type bucketReconcileFunc func(ctx context.Context, obj *sourcev1.Bucket, index *etagIndex, dir string) (sreconcile.Result, error)
// etagIndex is an index of storage object keys and their Etag values.
type etagIndex struct {
sync.RWMutex
index map[string]string
}
// newEtagIndex returns a new etagIndex with an empty initialized index.
func newEtagIndex() *etagIndex {
return &etagIndex{
index: make(map[string]string),
}
}
func (i *etagIndex) Add(key, etag string) {
i.Lock()
defer i.Unlock()
i.index[key] = etag
}
func (i *etagIndex) Delete(key string) {
i.Lock()
defer i.Unlock()
delete(i.index, key)
}
func (i *etagIndex) Get(key string) string {
i.RLock()
defer i.RUnlock()
return i.index[key]
}
func (i *etagIndex) Has(key string) bool {
i.RLock()
defer i.RUnlock()
_, ok := i.index[key]
return ok
}
func (i *etagIndex) Index() map[string]string {
i.RLock()
defer i.RUnlock()
index := make(map[string]string)
for k, v := range i.index {
index[k] = v
}
return index
}
func (i *etagIndex) Len() int {
i.RLock()
defer i.RUnlock()
return len(i.index)
}
// Revision calculates the SHA256 checksum of the index.
// The keys are stable sorted, and the SHA256 sum is then calculated for the
// string representation of the key/value pairs, each pair written on a newline
// with a space between them. The sum result is returned as a string.
func (i *etagIndex) Revision() (string, error) {
i.RLock()
defer i.RUnlock()
keyIndex := make([]string, 0, len(i.index))
for k := range i.index {
keyIndex = append(keyIndex, k)
}
sort.Strings(keyIndex)
sum := sha256.New()
for _, k := range keyIndex {
if _, err := sum.Write([]byte(fmt.Sprintf("%s %s\n", k, i.index[k]))); err != nil {
return "", err
}
}
return fmt.Sprintf("%x", sum.Sum(nil)), nil
}
func (r *BucketReconciler) SetupWithManager(mgr ctrl.Manager) error { func (r *BucketReconciler) SetupWithManager(mgr ctrl.Manager) error {
return r.SetupWithManagerAndOptions(mgr, BucketReconcilerOptions{}) return r.SetupWithManagerAndOptions(mgr, BucketReconcilerOptions{})
} }
func (r *BucketReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts BucketReconcilerOptions) error { func (r *BucketReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts BucketReconcilerOptions) error {
r.patchOptions = getPatchOptions(bucketReadyCondition.Owned, r.ControllerName)
return ctrl.NewControllerManagedBy(mgr). return ctrl.NewControllerManagedBy(mgr).
For(&sourcev1.Bucket{}). For(&sourcev1.Bucket{}).
WithEventFilter(predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{})). WithEventFilter(predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{})).
WithOptions(controller.Options{ WithOptions(controller.Options{
RateLimiter: opts.RateLimiter, MaxConcurrentReconciles: opts.MaxConcurrentReconciles,
RateLimiter: opts.RateLimiter,
}). }).
Complete(r) Complete(r)
} }
@ -186,8 +254,20 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res
return ctrl.Result{}, client.IgnoreNotFound(err) return ctrl.Result{}, client.IgnoreNotFound(err)
} }
// Record suspended status metric
r.RecordSuspend(ctx, obj, obj.Spec.Suspend)
// Return early if the object is suspended
if obj.Spec.Suspend {
log.Info("reconciliation is suspended for this object")
return ctrl.Result{}, nil
}
// Initialize the patch helper with the current version of the object. // Initialize the patch helper with the current version of the object.
serialPatcher := patch.NewSerialPatcher(obj, r.Client) patchHelper, err := patch.NewHelper(obj, r.Client)
if err != nil {
return ctrl.Result{}, err
}
// recResult stores the abstracted reconcile result. // recResult stores the abstracted reconcile result.
var recResult sreconcile.Result var recResult sreconcile.Result
@ -195,47 +275,36 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res
// Always attempt to patch the object and status after each reconciliation // Always attempt to patch the object and status after each reconciliation
// NOTE: The final runtime result and error are set in this block. // NOTE: The final runtime result and error are set in this block.
defer func() { defer func() {
summarizeHelper := summarize.NewHelper(r.EventRecorder, serialPatcher) summarizeHelper := summarize.NewHelper(r.EventRecorder, patchHelper)
summarizeOpts := []summarize.Option{ summarizeOpts := []summarize.Option{
summarize.WithConditions(bucketReadyCondition), summarize.WithConditions(bucketReadyCondition),
summarize.WithReconcileResult(recResult), summarize.WithReconcileResult(recResult),
summarize.WithReconcileError(retErr), summarize.WithReconcileError(retErr),
summarize.WithIgnoreNotFound(), summarize.WithIgnoreNotFound(),
summarize.WithProcessors( summarize.WithProcessors(
summarize.ErrorActionHandler, summarize.RecordContextualError,
summarize.RecordReconcileReq, summarize.RecordReconcileReq,
), ),
summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{ summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetRequeueAfter()}),
RequeueAfter: jitter.JitteredIntervalDuration(obj.GetRequeueAfter()),
}),
summarize.WithPatchFieldOwner(r.ControllerName), summarize.WithPatchFieldOwner(r.ControllerName),
} }
result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...)
// Always record duration metrics. // Always record readiness and duration metrics
r.Metrics.RecordReadiness(ctx, obj)
r.Metrics.RecordDuration(ctx, obj, start) r.Metrics.RecordDuration(ctx, obj, start)
}() }()
// Examine if the object is under deletion. // Add finalizer first if not exist to avoid the race condition between init and delete
if !obj.ObjectMeta.DeletionTimestamp.IsZero() {
recResult, retErr = r.reconcileDelete(ctx, obj)
return
}
// Add finalizer first if not exist to avoid the race condition between init
// and delete.
// Note: Finalizers in general can only be added when the deletionTimestamp
// is not set.
if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) { if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) {
controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer) controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer)
recResult = sreconcile.ResultRequeue recResult = sreconcile.ResultRequeue
return return
} }
// Return if the object is suspended. // Examine if the object is under deletion
if obj.Spec.Suspend { if !obj.ObjectMeta.DeletionTimestamp.IsZero() {
log.Info("reconciliation is suspended for this object") recResult, retErr = r.reconcileDelete(ctx, obj)
recResult, retErr = sreconcile.ResultEmpty, nil
return return
} }
@ -245,45 +314,29 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res
r.reconcileSource, r.reconcileSource,
r.reconcileArtifact, r.reconcileArtifact,
} }
recResult, retErr = r.reconcile(ctx, serialPatcher, obj, reconcilers) recResult, retErr = r.reconcile(ctx, obj, reconcilers)
return return
} }
// reconcile iterates through the bucketReconcileFunc tasks for the // reconcile iterates through the bucketReconcileFunc tasks for the
// object. It returns early on the first call that returns // object. It returns early on the first call that returns
// reconcile.ResultRequeue, or produces an error. // reconcile.ResultRequeue, or produces an error.
func (r *BucketReconciler) reconcile(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.Bucket, reconcilers []bucketReconcileFunc) (sreconcile.Result, error) { func (r *BucketReconciler) reconcile(ctx context.Context, obj *sourcev1.Bucket, reconcilers []bucketReconcileFunc) (sreconcile.Result, error) {
oldObj := obj.DeepCopy() oldObj := obj.DeepCopy()
rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason, "reconciliation in progress") // Mark as reconciling if generation differs.
if obj.Generation != obj.Status.ObservedGeneration {
var recAtVal string conditions.MarkReconciling(obj, "NewGeneration", "reconciling new object generation (%d)", obj.Generation)
if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok {
recAtVal = v
}
// Persist reconciling if generation differs or reconciliation is requested.
switch {
case obj.Generation != obj.Status.ObservedGeneration:
rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason,
"processing object: new generation %d -> %d", obj.Status.ObservedGeneration, obj.Generation)
if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil {
return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason)
}
case recAtVal != obj.Status.GetLastHandledReconcileRequest():
if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil {
return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason)
}
} }
// Create temp working dir // Create temp working dir
tmpDir, err := os.MkdirTemp("", fmt.Sprintf("%s-%s-%s-", obj.Kind, obj.Namespace, obj.Name)) tmpDir, err := os.MkdirTemp("", fmt.Sprintf("%s-%s-%s-", obj.Kind, obj.Namespace, obj.Name))
if err != nil { if err != nil {
e := serror.NewGeneric( e := &serror.Event{
fmt.Errorf("failed to create temporary working directory: %w", err), Err: fmt.Errorf("failed to create temporary working directory: %w", err),
sourcev1.DirCreationFailedReason, Reason: sourcev1.DirCreationFailedReason,
) }
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e return sreconcile.ResultEmpty, e
} }
defer func() { defer func() {
@ -297,11 +350,11 @@ func (r *BucketReconciler) reconcile(ctx context.Context, sp *patch.SerialPatche
var ( var (
res sreconcile.Result res sreconcile.Result
resErr error resErr error
index = index.NewDigester() index = newEtagIndex()
) )
for _, rec := range reconcilers { for _, rec := range reconcilers {
recResult, err := rec(ctx, sp, obj, index, tmpDir) recResult, err := rec(ctx, obj, index, tmpDir)
// Exit immediately on ResultRequeue. // Exit immediately on ResultRequeue.
if recResult == sreconcile.ResultRequeue { if recResult == sreconcile.ResultRequeue {
return sreconcile.ResultRequeue, nil return sreconcile.ResultRequeue, nil
@ -317,33 +370,36 @@ func (r *BucketReconciler) reconcile(ctx context.Context, sp *patch.SerialPatche
res = sreconcile.LowestRequeuingResult(res, recResult) res = sreconcile.LowestRequeuingResult(res, recResult)
} }
r.notify(ctx, oldObj, obj, index, res, resErr) r.notify(oldObj, obj, index, res, resErr)
return res, resErr return res, resErr
} }
// notify emits notification related to the reconciliation. // notify emits notification related to the reconciliation.
func (r *BucketReconciler) notify(ctx context.Context, oldObj, newObj *sourcev1.Bucket, index *index.Digester, res sreconcile.Result, resErr error) { func (r *BucketReconciler) notify(oldObj, newObj *sourcev1.Bucket, index *etagIndex, res sreconcile.Result, resErr error) {
// Notify successful reconciliation for new artifact and recovery from any // Notify successful reconciliation for new artifact and recovery from any
// failure. // failure.
if resErr == nil && res == sreconcile.ResultSuccess && newObj.Status.Artifact != nil { if resErr == nil && res == sreconcile.ResultSuccess && newObj.Status.Artifact != nil {
annotations := map[string]string{ annotations := map[string]string{
fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaRevisionKey): newObj.Status.Artifact.Revision, sourcev1.GroupVersion.Group + "/revision": newObj.Status.Artifact.Revision,
fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaDigestKey): newObj.Status.Artifact.Digest, sourcev1.GroupVersion.Group + "/checksum": newObj.Status.Artifact.Checksum,
}
var oldChecksum string
if oldObj.GetArtifact() != nil {
oldChecksum = oldObj.GetArtifact().Checksum
} }
message := fmt.Sprintf("stored artifact with %d fetched files from '%s' bucket", index.Len(), newObj.Spec.BucketName) message := fmt.Sprintf("stored artifact with %d fetched files from '%s' bucket", index.Len(), newObj.Spec.BucketName)
// Notify on new artifact and failure recovery. // Notify on new artifact and failure recovery.
if !oldObj.GetArtifact().HasDigest(newObj.GetArtifact().Digest) { if oldChecksum != newObj.GetArtifact().Checksum {
r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal, r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal,
"NewArtifact", message) "NewArtifact", message)
ctrl.LoggerFrom(ctx).Info(message)
} else { } else {
if sreconcile.FailureRecovery(oldObj, newObj, bucketFailConditions) { if sreconcile.FailureRecovery(oldObj, newObj, bucketFailConditions) {
r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal, r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal,
meta.SucceededReason, message) meta.SucceededReason, message)
ctrl.LoggerFrom(ctx).Info(message)
} }
} }
} }
@ -352,58 +408,30 @@ func (r *BucketReconciler) notify(ctx context.Context, oldObj, newObj *sourcev1.
// reconcileStorage ensures the current state of the storage matches the // reconcileStorage ensures the current state of the storage matches the
// desired and previously observed state. // desired and previously observed state.
// //
// The garbage collection is executed based on the flag configured settings and // All Artifacts for the object except for the current one in the Status are
// may remove files that are beyond their TTL or the maximum number of files // garbage collected from the Storage.
// to survive a collection cycle.
// If the Artifact in the Status of the object disappeared from the Storage, // If the Artifact in the Status of the object disappeared from the Storage,
// it is removed from the object. // it is removed from the object.
// If the object does not have an Artifact in its Status, a Reconciling // If the object does not have an Artifact in its Status, a Reconciling
// condition is added. // condition is added.
// The hostname of any URL in the Status of the object are updated, to ensure // The hostname of any URL in the Status of the object are updated, to ensure
// they match the Storage server hostname of current runtime. // they match the Storage server hostname of current runtime.
func (r *BucketReconciler) reconcileStorage(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.Bucket, _ *index.Digester, _ string) (sreconcile.Result, error) { func (r *BucketReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.Bucket, _ *etagIndex, _ string) (sreconcile.Result, error) {
// Garbage collect previous advertised artifact(s) from storage // Garbage collect previous advertised artifact(s) from storage
_ = r.garbageCollect(ctx, obj) _ = r.garbageCollect(ctx, obj)
var artifactMissing bool // Determine if the advertised artifact is still in storage
if artifact := obj.GetArtifact(); artifact != nil { if artifact := obj.GetArtifact(); artifact != nil && !r.Storage.ArtifactExist(*artifact) {
// Determine if the advertised artifact is still in storage obj.Status.Artifact = nil
if !r.Storage.ArtifactExist(*artifact) { obj.Status.URL = ""
artifactMissing = true // Remove the condition as the artifact doesn't exist.
} conditions.Delete(obj, sourcev1.ArtifactInStorageCondition)
// If the artifact is in storage, verify if the advertised digest still
// matches the actual artifact
if !artifactMissing {
if err := r.Storage.VerifyArtifact(*artifact); err != nil {
r.Eventf(obj, corev1.EventTypeWarning, "ArtifactVerificationFailed", "failed to verify integrity of artifact: %s", err.Error())
if err = r.Storage.Remove(*artifact); err != nil {
return sreconcile.ResultEmpty, fmt.Errorf("failed to remove artifact after digest mismatch: %w", err)
}
artifactMissing = true
}
}
// If the artifact is missing, remove it from the object
if artifactMissing {
obj.Status.Artifact = nil
obj.Status.URL = ""
}
} }
// Record that we do not have an artifact // Record that we do not have an artifact
if obj.GetArtifact() == nil { if obj.GetArtifact() == nil {
msg := "building artifact" conditions.MarkReconciling(obj, "NoArtifact", "no artifact for resource in storage")
if artifactMissing {
msg += ": disappeared from storage"
}
rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "%s", msg)
conditions.Delete(obj, sourcev1.ArtifactInStorageCondition) conditions.Delete(obj, sourcev1.ArtifactInStorageCondition)
if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil {
return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason)
}
return sreconcile.ResultSuccess, nil return sreconcile.ResultSuccess, nil
} }
@ -418,164 +446,92 @@ func (r *BucketReconciler) reconcileStorage(ctx context.Context, sp *patch.Seria
// reconcileSource fetches the upstream bucket contents with the client for the // reconcileSource fetches the upstream bucket contents with the client for the
// given object's Provider, and returns the result. // given object's Provider, and returns the result.
// When a SecretRef is defined, it attempts to fetch the Secret before calling // When a SecretRef is defined, it attempts to fetch the Secret before calling
// the provider. If this fails, it records v1.FetchFailedCondition=True on // the provider. If this fails, it records v1beta2.FetchFailedCondition=True on
// the object and returns early. // the object and returns early.
func (r *BucketReconciler) reconcileSource(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.Bucket, index *index.Digester, dir string) (sreconcile.Result, error) { func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bucket, index *etagIndex, dir string) (sreconcile.Result, error) {
secret, err := r.getSecret(ctx, obj.Spec.SecretRef, obj.GetNamespace()) secret, err := r.getBucketSecret(ctx, obj)
if err != nil { if err != nil {
e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason) e := &serror.Event{Err: err, Reason: sourcev1.AuthenticationFailedReason}
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error())
// Return error as the world as observed may change // Return error as the world as observed may change
return sreconcile.ResultEmpty, e return sreconcile.ResultEmpty, e
} }
proxyURL, err := r.getProxyURL(ctx, obj)
if err != nil {
e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason)
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e)
return sreconcile.ResultEmpty, e
}
// Construct provider client // Construct provider client
var provider BucketProvider var provider BucketProvider
switch obj.Spec.Provider { switch obj.Spec.Provider {
case sourcev1.BucketProviderGoogle: case sourcev1.GoogleBucketProvider:
if err = gcp.ValidateSecret(secret); err != nil { if err = gcp.ValidateSecret(secret); err != nil {
e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason) e := &serror.Event{Err: err, Reason: sourcev1.AuthenticationFailedReason}
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error())
return sreconcile.ResultEmpty, e return sreconcile.ResultEmpty, e
} }
var opts []gcp.Option if provider, err = gcp.NewClient(ctx, secret); err != nil {
if secret != nil { e := &serror.Event{Err: err, Reason: "ClientError"}
opts = append(opts, gcp.WithSecret(secret)) conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error())
}
if proxyURL != nil {
opts = append(opts, gcp.WithProxyURL(proxyURL))
}
if provider, err = gcp.NewClient(ctx, opts...); err != nil {
e := serror.NewGeneric(err, "ClientError")
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e)
return sreconcile.ResultEmpty, e return sreconcile.ResultEmpty, e
} }
case sourcev1.BucketProviderAzure: case sourcev1.AzureBucketProvider:
if err = azure.ValidateSecret(secret); err != nil { if err = azure.ValidateSecret(secret); err != nil {
e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason) e := &serror.Event{Err: err, Reason: sourcev1.AuthenticationFailedReason}
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error())
return sreconcile.ResultEmpty, e return sreconcile.ResultEmpty, e
} }
var opts []azure.Option if provider, err = azure.NewClient(obj, secret); err != nil {
if secret != nil { e := &serror.Event{Err: err, Reason: "ClientError"}
opts = append(opts, azure.WithSecret(secret)) conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error())
}
if proxyURL != nil {
opts = append(opts, azure.WithProxyURL(proxyURL))
}
if provider, err = azure.NewClient(obj, opts...); err != nil {
e := serror.NewGeneric(err, "ClientError")
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e)
return sreconcile.ResultEmpty, e return sreconcile.ResultEmpty, e
} }
default: default:
if err = minio.ValidateSecret(secret); err != nil { if err = minio.ValidateSecret(secret); err != nil {
e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason) e := &serror.Event{Err: err, Reason: sourcev1.AuthenticationFailedReason}
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error())
return sreconcile.ResultEmpty, e return sreconcile.ResultEmpty, e
} }
tlsConfig, err := r.getTLSConfig(ctx, obj.Spec.CertSecretRef, obj.GetNamespace(), obj.Spec.Endpoint) if provider, err = minio.NewClient(obj, secret); err != nil {
if err != nil { e := &serror.Event{Err: err, Reason: "ClientError"}
e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason) conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error())
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e)
return sreconcile.ResultEmpty, e
}
stsSecret, err := r.getSTSSecret(ctx, obj)
if err != nil {
e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason)
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e)
return sreconcile.ResultEmpty, e
}
stsTLSConfig, err := r.getSTSTLSConfig(ctx, obj)
if err != nil {
err := fmt.Errorf("failed to get STS TLS config: %w", err)
e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason)
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e)
return sreconcile.ResultEmpty, e
}
if sts := obj.Spec.STS; sts != nil {
if err := minio.ValidateSTSProvider(obj.Spec.Provider, sts); err != nil {
e := serror.NewStalling(err, sourcev1.InvalidSTSConfigurationReason)
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e)
return sreconcile.ResultEmpty, e
}
if _, err := url.Parse(sts.Endpoint); err != nil {
err := fmt.Errorf("failed to parse STS endpoint '%s': %w", sts.Endpoint, err)
e := serror.NewStalling(err, sourcev1.URLInvalidReason)
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e)
return sreconcile.ResultEmpty, e
}
if err := minio.ValidateSTSSecret(sts.Provider, stsSecret); err != nil {
e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason)
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e)
return sreconcile.ResultEmpty, e
}
}
var opts []minio.Option
if secret != nil {
opts = append(opts, minio.WithSecret(secret))
}
if tlsConfig != nil {
opts = append(opts, minio.WithTLSConfig(tlsConfig))
}
if proxyURL != nil {
opts = append(opts, minio.WithProxyURL(proxyURL))
}
if stsSecret != nil {
opts = append(opts, minio.WithSTSSecret(stsSecret))
}
if stsTLSConfig != nil {
opts = append(opts, minio.WithSTSTLSConfig(stsTLSConfig))
}
if provider, err = minio.NewClient(obj, opts...); err != nil {
e := serror.NewGeneric(err, "ClientError")
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e)
return sreconcile.ResultEmpty, e return sreconcile.ResultEmpty, e
} }
} }
// Fetch etag index // Fetch etag index
if err = fetchEtagIndex(ctx, provider, obj, index, dir); err != nil { if err = fetchEtagIndex(ctx, provider, obj, index, dir); err != nil {
e := serror.NewGeneric(err, sourcev1.BucketOperationFailedReason) e := &serror.Event{Err: err, Reason: sourcev1.BucketOperationFailedReason}
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error())
return sreconcile.ResultEmpty, e return sreconcile.ResultEmpty, e
} }
// Check if index has changed compared to current Artifact revision. // Calculate revision
var changed bool revision, err := index.Revision()
if artifact := obj.Status.Artifact; artifact != nil && artifact.Revision != "" { if err != nil {
curRev := digest.Digest(artifact.Revision) return sreconcile.ResultEmpty, &serror.Event{
changed = curRev.Validate() != nil || curRev != index.Digest(curRev.Algorithm()) Err: fmt.Errorf("failed to calculate revision: %w", err),
Reason: meta.FailedReason,
}
} }
// Fetch the bucket objects if required to. // Mark observations about the revision on the object
if artifact := obj.GetArtifact(); artifact == nil || changed { defer func() {
// Mark observations about the revision on the object // As fetchIndexFiles can make last-minute modifications to the etag
defer func() { // index, we need to re-calculate the revision at the end
// As fetchIndexFiles can make last-minute modifications to the etag revision, err := index.Revision()
// index, we need to re-calculate the revision at the end if err != nil {
revision := index.Digest(intdigest.Canonical) ctrl.LoggerFrom(ctx).Error(err, "failed to calculate revision after fetching etag index")
return
}
if !obj.GetArtifact().HasRevision(revision) {
message := fmt.Sprintf("new upstream revision '%s'", revision) message := fmt.Sprintf("new upstream revision '%s'", revision)
if obj.GetArtifact() != nil { conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", message)
conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "%s", message) conditions.MarkReconciling(obj, "NewRevision", message)
} }
rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "building artifact: %s", message) }()
if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil {
ctrl.LoggerFrom(ctx).Error(err, "failed to patch")
return
}
}()
if !obj.GetArtifact().HasRevision(revision) {
if err = fetchIndexFiles(ctx, provider, obj, index, dir); err != nil { if err = fetchIndexFiles(ctx, provider, obj, index, dir); err != nil {
e := serror.NewGeneric(err, sourcev1.BucketOperationFailedReason) e := &serror.Event{Err: err, Reason: sourcev1.BucketOperationFailedReason}
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error())
return sreconcile.ResultEmpty, e return sreconcile.ResultEmpty, e
} }
} }
@ -588,92 +544,91 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, sp *patch.Serial
// (Status) data on the object does not match the given. // (Status) data on the object does not match the given.
// //
// The inspection of the given data to the object is differed, ensuring any // The inspection of the given data to the object is differed, ensuring any
// stale observations like v1.ArtifactOutdatedCondition are removed. // stale observations like v1beta2.ArtifactOutdatedCondition are removed.
// If the given Artifact does not differ from the object's current, it returns // If the given Artifact does not differ from the object's current, it returns
// early. // early.
// On a successful archive, the Artifact in the Status of the object is set, // On a successful archive, the Artifact in the Status of the object is set,
// and the symlink in the Storage is updated to its path. // and the symlink in the Storage is updated to its path.
func (r *BucketReconciler) reconcileArtifact(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.Bucket, index *index.Digester, dir string) (sreconcile.Result, error) { func (r *BucketReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1.Bucket, index *etagIndex, dir string) (sreconcile.Result, error) {
// Calculate revision // Calculate revision
revision := index.Digest(intdigest.Canonical) revision, err := index.Revision()
if err != nil {
return sreconcile.ResultEmpty, &serror.Event{
Err: fmt.Errorf("failed to calculate revision of new artifact: %w", err),
Reason: meta.FailedReason,
}
}
// Create artifact // Create artifact
artifact := r.Storage.NewArtifactFor(obj.Kind, obj, revision.String(), fmt.Sprintf("%s.tar.gz", revision.Encoded())) artifact := r.Storage.NewArtifactFor(obj.Kind, obj, revision, fmt.Sprintf("%s.tar.gz", revision))
// Set the ArtifactInStorageCondition if there's no drift. // Set the ArtifactInStorageCondition if there's no drift.
defer func() { defer func() {
if curArtifact := obj.GetArtifact(); curArtifact != nil && curArtifact.Revision != "" { if obj.GetArtifact().HasRevision(artifact.Revision) {
curRev := digest.Digest(curArtifact.Revision) conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition)
if curRev.Validate() == nil && index.Digest(curRev.Algorithm()) == curRev { conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason,
conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition) "stored artifact for revision '%s'", artifact.Revision)
conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason,
"stored artifact: revision '%s'", artifact.Revision)
}
} }
}() }()
// The artifact is up-to-date // The artifact is up-to-date
if curArtifact := obj.GetArtifact(); curArtifact != nil && curArtifact.Revision != "" { if obj.GetArtifact().HasRevision(artifact.Revision) {
curRev := digest.Digest(curArtifact.Revision) r.eventLogf(ctx, obj, events.EventTypeTrace, sourcev1.ArtifactUpToDateReason, "artifact up-to-date with remote revision: '%s'", artifact.Revision)
if curRev.Validate() == nil && index.Digest(curRev.Algorithm()) == curRev { return sreconcile.ResultSuccess, nil
r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.ArtifactUpToDateReason, "artifact up-to-date with remote revision: '%s'", artifact.Revision)
return sreconcile.ResultSuccess, nil
}
} }
// Ensure target path exists and is a directory // Ensure target path exists and is a directory
if f, err := os.Stat(dir); err != nil { if f, err := os.Stat(dir); err != nil {
e := serror.NewGeneric( e := &serror.Event{
fmt.Errorf("failed to stat source path: %w", err), Err: fmt.Errorf("failed to stat source path: %w", err),
sourcev1.StatOperationFailedReason, Reason: sourcev1.StatOperationFailedReason,
) }
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e return sreconcile.ResultEmpty, e
} else if !f.IsDir() { } else if !f.IsDir() {
e := serror.NewGeneric( e := &serror.Event{
fmt.Errorf("source path '%s' is not a directory", dir), Err: fmt.Errorf("source path '%s' is not a directory", dir),
sourcev1.InvalidPathReason, Reason: sourcev1.InvalidPathReason,
) }
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e return sreconcile.ResultEmpty, e
} }
// Ensure artifact directory exists and acquire lock // Ensure artifact directory exists and acquire lock
if err := r.Storage.MkdirAll(artifact); err != nil { if err := r.Storage.MkdirAll(artifact); err != nil {
e := serror.NewGeneric( e := &serror.Event{
fmt.Errorf("failed to create artifact directory: %w", err), Err: fmt.Errorf("failed to create artifact directory: %w", err),
sourcev1.DirCreationFailedReason, Reason: sourcev1.DirCreationFailedReason,
) }
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e return sreconcile.ResultEmpty, e
} }
unlock, err := r.Storage.Lock(artifact) unlock, err := r.Storage.Lock(artifact)
if err != nil { if err != nil {
return sreconcile.ResultEmpty, serror.NewGeneric( return sreconcile.ResultEmpty, &serror.Event{
fmt.Errorf("failed to acquire lock for artifact: %w", err), Err: fmt.Errorf("failed to acquire lock for artifact: %w", err),
meta.FailedReason, Reason: meta.FailedReason,
) }
} }
defer unlock() defer unlock()
// Archive directory to storage // Archive directory to storage
if err := r.Storage.Archive(&artifact, dir, nil); err != nil { if err := r.Storage.Archive(&artifact, dir, nil); err != nil {
e := serror.NewGeneric( e := &serror.Event{
fmt.Errorf("unable to archive artifact to storage: %s", err), Err: fmt.Errorf("unable to archive artifact to storage: %s", err),
sourcev1.ArchiveOperationFailedReason, Reason: sourcev1.ArchiveOperationFailedReason,
) }
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e return sreconcile.ResultEmpty, e
} }
// Record it on the object // Record it on the object
obj.Status.Artifact = artifact.DeepCopy() obj.Status.Artifact = artifact.DeepCopy()
obj.Status.ObservedIgnore = obj.Spec.Ignore
// Update symlink on a "best effort" basis // Update symlink on a "best effort" basis
url, err := r.Storage.Symlink(artifact, "latest.tar.gz") url, err := r.Storage.Symlink(artifact, "latest.tar.gz")
if err != nil { if err != nil {
r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.SymlinkUpdateFailedReason, r.eventLogf(ctx, obj, events.EventTypeTrace, sourcev1.SymlinkUpdateFailedReason,
"failed to update status URL symlink: %s", err) "failed to update status URL symlink: %s", err)
} }
if url != "" { if url != "" {
@ -708,12 +663,12 @@ func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.Bu
func (r *BucketReconciler) garbageCollect(ctx context.Context, obj *sourcev1.Bucket) error { func (r *BucketReconciler) garbageCollect(ctx context.Context, obj *sourcev1.Bucket) error {
if !obj.DeletionTimestamp.IsZero() { if !obj.DeletionTimestamp.IsZero() {
if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil {
return serror.NewGeneric( return &serror.Event{
fmt.Errorf("garbage collection for deleted resource failed: %s", err), Err: fmt.Errorf("garbage collection for deleted resource failed: %s", err),
"GarbageCollectionFailed", Reason: "GarbageCollectionFailed",
) }
} else if deleted != "" { } else if deleted != "" {
r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded", r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded",
"garbage collected artifacts for deleted resource") "garbage collected artifacts for deleted resource")
} }
obj.Status.Artifact = nil obj.Status.Artifact = nil
@ -722,29 +677,29 @@ func (r *BucketReconciler) garbageCollect(ctx context.Context, obj *sourcev1.Buc
if obj.GetArtifact() != nil { if obj.GetArtifact() != nil {
delFiles, err := r.Storage.GarbageCollect(ctx, *obj.GetArtifact(), time.Second*5) delFiles, err := r.Storage.GarbageCollect(ctx, *obj.GetArtifact(), time.Second*5)
if err != nil { if err != nil {
return serror.NewGeneric( return &serror.Event{
fmt.Errorf("garbage collection of artifacts failed: %w", err), Err: fmt.Errorf("garbage collection of artifacts failed: %w", err),
"GarbageCollectionFailed", Reason: "GarbageCollectionFailed",
) }
} }
if len(delFiles) > 0 { if len(delFiles) > 0 {
r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded", r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded",
"garbage collected %d artifacts", len(delFiles)) fmt.Sprintf("garbage collected %d artifacts", len(delFiles)))
return nil return nil
} }
} }
return nil return nil
} }
// getSecret attempts to fetch a Secret reference if specified. It returns any client error. // getBucketSecret attempts to fetch the Secret reference if specified on the
func (r *BucketReconciler) getSecret(ctx context.Context, secretRef *meta.LocalObjectReference, // obj. It returns any client error.
namespace string) (*corev1.Secret, error) { func (r *BucketReconciler) getBucketSecret(ctx context.Context, obj *sourcev1.Bucket) (*corev1.Secret, error) {
if secretRef == nil { if obj.Spec.SecretRef == nil {
return nil, nil return nil, nil
} }
secretName := types.NamespacedName{ secretName := types.NamespacedName{
Namespace: namespace, Namespace: obj.GetNamespace(),
Name: secretRef.Name, Name: obj.Spec.SecretRef.Name,
} }
secret := &corev1.Secret{} secret := &corev1.Secret{}
if err := r.Get(ctx, secretName, secret); err != nil { if err := r.Get(ctx, secretName, secret); err != nil {
@ -753,68 +708,6 @@ func (r *BucketReconciler) getSecret(ctx context.Context, secretRef *meta.LocalO
return secret, nil return secret, nil
} }
// getTLSConfig attempts to fetch a TLS configuration from the given
// Secret reference, namespace and endpoint.
func (r *BucketReconciler) getTLSConfig(ctx context.Context,
secretRef *meta.LocalObjectReference, namespace, endpoint string) (*stdtls.Config, error) {
certSecret, err := r.getSecret(ctx, secretRef, namespace)
if err != nil || certSecret == nil {
return nil, err
}
tlsConfig, _, err := tls.KubeTLSClientConfigFromSecret(*certSecret, endpoint)
if err != nil {
return nil, fmt.Errorf("failed to create TLS config: %w", err)
}
if tlsConfig == nil {
return nil, fmt.Errorf("certificate secret does not contain any TLS configuration")
}
return tlsConfig, nil
}
// getProxyURL attempts to fetch a proxy URL from the object's proxy secret
// reference.
func (r *BucketReconciler) getProxyURL(ctx context.Context, obj *sourcev1.Bucket) (*url.URL, error) {
namespace := obj.GetNamespace()
proxySecret, err := r.getSecret(ctx, obj.Spec.ProxySecretRef, namespace)
if err != nil || proxySecret == nil {
return nil, err
}
proxyData := proxySecret.Data
address, ok := proxyData["address"]
if !ok {
return nil, fmt.Errorf("invalid proxy secret '%s/%s': key 'address' is missing",
namespace, obj.Spec.ProxySecretRef.Name)
}
proxyURL, err := url.Parse(string(address))
if err != nil {
return nil, fmt.Errorf("failed to parse proxy address '%s': %w", address, err)
}
user, hasUser := proxyData["username"]
password, hasPassword := proxyData["password"]
if hasUser || hasPassword {
proxyURL.User = url.UserPassword(string(user), string(password))
}
return proxyURL, nil
}
// getSTSSecret attempts to fetch the secret from the object's STS secret
// reference.
func (r *BucketReconciler) getSTSSecret(ctx context.Context, obj *sourcev1.Bucket) (*corev1.Secret, error) {
if obj.Spec.STS == nil {
return nil, nil
}
return r.getSecret(ctx, obj.Spec.STS.SecretRef, obj.GetNamespace())
}
// getSTSTLSConfig attempts to fetch the certificate secret from the object's
// STS configuration.
func (r *BucketReconciler) getSTSTLSConfig(ctx context.Context, obj *sourcev1.Bucket) (*stdtls.Config, error) {
if obj.Spec.STS == nil {
return nil, nil
}
return r.getTLSConfig(ctx, obj.Spec.STS.CertSecretRef, obj.GetNamespace(), obj.Spec.STS.Endpoint)
}
// eventLogf records events, and logs at the same time. // eventLogf records events, and logs at the same time.
// //
// This log is different from the debug log in the EventRecorder, in the sense // This log is different from the debug log in the EventRecorder, in the sense
@ -845,7 +738,7 @@ func (r *BucketReconciler) annotatedEventLogf(ctx context.Context,
// bucket using the given provider, while filtering them using .sourceignore // bucket using the given provider, while filtering them using .sourceignore
// rules. After fetching an object, the etag value in the index is updated to // rules. After fetching an object, the etag value in the index is updated to
// the current value to ensure accuracy. // the current value to ensure accuracy.
func fetchEtagIndex(ctx context.Context, provider BucketProvider, obj *sourcev1.Bucket, index *index.Digester, tempDir string) error { func fetchEtagIndex(ctx context.Context, provider BucketProvider, obj *sourcev1.Bucket, index *etagIndex, tempDir string) error {
ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration) ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration)
defer cancel() defer cancel()
@ -863,7 +756,7 @@ func fetchEtagIndex(ctx context.Context, provider BucketProvider, obj *sourcev1.
path := filepath.Join(tempDir, sourceignore.IgnoreFile) path := filepath.Join(tempDir, sourceignore.IgnoreFile)
if _, err := provider.FGetObject(ctxTimeout, obj.Spec.BucketName, sourceignore.IgnoreFile, path); err != nil { if _, err := provider.FGetObject(ctxTimeout, obj.Spec.BucketName, sourceignore.IgnoreFile, path); err != nil {
if !provider.ObjectIsNotFound(err) { if !provider.ObjectIsNotFound(err) {
return fmt.Errorf("failed to get Etag for '%s' object: %w", sourceignore.IgnoreFile, serror.SanitizeError(err)) return err
} }
} }
ps, err := sourceignore.ReadIgnoreFile(path, nil) ps, err := sourceignore.ReadIgnoreFile(path, nil)
@ -877,7 +770,7 @@ func fetchEtagIndex(ctx context.Context, provider BucketProvider, obj *sourcev1.
matcher := sourceignore.NewMatcher(ps) matcher := sourceignore.NewMatcher(ps)
// Build up index // Build up index
err = provider.VisitObjects(ctxTimeout, obj.Spec.BucketName, obj.Spec.Prefix, func(key, etag string) error { err = provider.VisitObjects(ctxTimeout, obj.Spec.BucketName, func(key, etag string) error {
if strings.HasSuffix(key, "/") || key == sourceignore.IgnoreFile { if strings.HasSuffix(key, "/") || key == sourceignore.IgnoreFile {
return nil return nil
} }
@ -899,7 +792,7 @@ func fetchEtagIndex(ctx context.Context, provider BucketProvider, obj *sourcev1.
// using the given provider, and stores them into tempDir. It downloads in // using the given provider, and stores them into tempDir. It downloads in
// parallel, but limited to the maxConcurrentBucketFetches. // parallel, but limited to the maxConcurrentBucketFetches.
// Given an index is provided, the bucket is assumed to exist. // Given an index is provided, the bucket is assumed to exist.
func fetchIndexFiles(ctx context.Context, provider BucketProvider, obj *sourcev1.Bucket, index *index.Digester, tempDir string) error { func fetchIndexFiles(ctx context.Context, provider BucketProvider, obj *sourcev1.Bucket, index *etagIndex, tempDir string) error {
ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration) ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration)
defer cancel() defer cancel()
@ -927,7 +820,7 @@ func fetchIndexFiles(ctx context.Context, provider BucketProvider, obj *sourcev1
index.Delete(k) index.Delete(k)
return nil return nil
} }
return fmt.Errorf("failed to get '%s' object: %w", k, serror.SanitizeError(err)) return fmt.Errorf("failed to get '%s' object: %w", k, err)
} }
if t != etag { if t != etag {
index.Add(k, etag) index.Add(k, etag)

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package controller package controllers
import ( import (
"context" "context"
@ -27,8 +27,7 @@ import (
"gotest.tools/assert" "gotest.tools/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
sourcev1 "github.com/fluxcd/source-controller/api/v1" sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
"github.com/fluxcd/source-controller/internal/index"
) )
type mockBucketObject struct { type mockBucketObject struct {
@ -41,7 +40,7 @@ type mockBucketClient struct {
objects map[string]mockBucketObject objects map[string]mockBucketObject
} }
var errMockNotFound = fmt.Errorf("not found") var mockNotFound = fmt.Errorf("not found")
func (m mockBucketClient) BucketExists(_ context.Context, name string) (bool, error) { func (m mockBucketClient) BucketExists(_ context.Context, name string) (bool, error) {
return name == m.bucketName, nil return name == m.bucketName, nil
@ -57,7 +56,7 @@ func (m mockBucketClient) FGetObject(_ context.Context, bucket, obj, path string
} }
object, ok := m.objects[obj] object, ok := m.objects[obj]
if !ok { if !ok {
return "", errMockNotFound return "", mockNotFound
} }
if err := os.WriteFile(path, []byte(object.data), os.FileMode(0660)); err != nil { if err := os.WriteFile(path, []byte(object.data), os.FileMode(0660)); err != nil {
return "", err return "", err
@ -66,10 +65,10 @@ func (m mockBucketClient) FGetObject(_ context.Context, bucket, obj, path string
} }
func (m mockBucketClient) ObjectIsNotFound(e error) bool { func (m mockBucketClient) ObjectIsNotFound(e error) bool {
return e == errMockNotFound return e == mockNotFound
} }
func (m mockBucketClient) VisitObjects(_ context.Context, _ string, _ string, f func(key, etag string) error) error { func (m mockBucketClient) VisitObjects(_ context.Context, _ string, f func(key, etag string) error) error {
for key, obj := range m.objects { for key, obj := range m.objects {
if err := f(key, obj.etag); err != nil { if err := f(key, obj.etag); err != nil {
return err return err
@ -78,7 +77,9 @@ func (m mockBucketClient) VisitObjects(_ context.Context, _ string, _ string, f
return nil return nil
} }
func (m mockBucketClient) Close(_ context.Context) {} func (m mockBucketClient) Close(_ context.Context) {
return
}
func (m *mockBucketClient) addObject(key string, object mockBucketObject) { func (m *mockBucketClient) addObject(key string, object mockBucketObject) {
if m.objects == nil { if m.objects == nil {
@ -87,8 +88,8 @@ func (m *mockBucketClient) addObject(key string, object mockBucketObject) {
m.objects[key] = object m.objects[key] = object
} }
func (m *mockBucketClient) objectsToDigestIndex() *index.Digester { func (m *mockBucketClient) objectsToEtagIndex() *etagIndex {
i := index.NewDigester() i := newEtagIndex()
for k, v := range m.objects { for k, v := range m.objects {
i.Add(k, v.etag) i.Add(k, v.etag)
} }
@ -106,15 +107,19 @@ func Test_fetchEtagIndex(t *testing.T) {
} }
t.Run("fetches etag index", func(t *testing.T) { t.Run("fetches etag index", func(t *testing.T) {
tmp := t.TempDir() tmp, err := os.MkdirTemp("", "test-bucket")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmp)
client := mockBucketClient{bucketName: bucketName} client := mockBucketClient{bucketName: bucketName}
client.addObject("foo.yaml", mockBucketObject{data: "foo.yaml", etag: "etag1"}) client.addObject("foo.yaml", mockBucketObject{data: "foo.yaml", etag: "etag1"})
client.addObject("bar.yaml", mockBucketObject{data: "bar.yaml", etag: "etag2"}) client.addObject("bar.yaml", mockBucketObject{data: "bar.yaml", etag: "etag2"})
client.addObject("baz.yaml", mockBucketObject{data: "baz.yaml", etag: "etag3"}) client.addObject("baz.yaml", mockBucketObject{data: "baz.yaml", etag: "etag3"})
index := index.NewDigester() index := newEtagIndex()
err := fetchEtagIndex(context.TODO(), client, bucket.DeepCopy(), index, tmp) err = fetchEtagIndex(context.TODO(), client, bucket.DeepCopy(), index, tmp)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -123,25 +128,33 @@ func Test_fetchEtagIndex(t *testing.T) {
}) })
t.Run("an error while bucket does not exist", func(t *testing.T) { t.Run("an error while bucket does not exist", func(t *testing.T) {
tmp := t.TempDir() tmp, err := os.MkdirTemp("", "test-bucket")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmp)
client := mockBucketClient{bucketName: "other-bucket-name"} client := mockBucketClient{bucketName: "other-bucket-name"}
index := index.NewDigester() index := newEtagIndex()
err := fetchEtagIndex(context.TODO(), client, bucket.DeepCopy(), index, tmp) err = fetchEtagIndex(context.TODO(), client, bucket.DeepCopy(), index, tmp)
assert.ErrorContains(t, err, "not found") assert.ErrorContains(t, err, "not found")
}) })
t.Run("filters with .sourceignore rules", func(t *testing.T) { t.Run("filters with .sourceignore rules", func(t *testing.T) {
tmp := t.TempDir() tmp, err := os.MkdirTemp("", "test-bucket")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmp)
client := mockBucketClient{bucketName: bucketName} client := mockBucketClient{bucketName: bucketName}
client.addObject(".sourceignore", mockBucketObject{etag: "sourceignore1", data: `*.txt`}) client.addObject(".sourceignore", mockBucketObject{etag: "sourceignore1", data: `*.txt`})
client.addObject("foo.yaml", mockBucketObject{etag: "etag1", data: "foo.yaml"}) client.addObject("foo.yaml", mockBucketObject{etag: "etag1", data: "foo.yaml"})
client.addObject("foo.txt", mockBucketObject{etag: "etag2", data: "foo.txt"}) client.addObject("foo.txt", mockBucketObject{etag: "etag2", data: "foo.txt"})
index := index.NewDigester() index := newEtagIndex()
err := fetchEtagIndex(context.TODO(), client, bucket.DeepCopy(), index, tmp) err = fetchEtagIndex(context.TODO(), client, bucket.DeepCopy(), index, tmp)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -157,7 +170,11 @@ func Test_fetchEtagIndex(t *testing.T) {
}) })
t.Run("filters with ignore rules from object", func(t *testing.T) { t.Run("filters with ignore rules from object", func(t *testing.T) {
tmp := t.TempDir() tmp, err := os.MkdirTemp("", "test-bucket")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmp)
client := mockBucketClient{bucketName: bucketName} client := mockBucketClient{bucketName: bucketName}
client.addObject(".sourceignore", mockBucketObject{etag: "sourceignore1", data: `*.txt`}) client.addObject(".sourceignore", mockBucketObject{etag: "sourceignore1", data: `*.txt`})
@ -167,8 +184,8 @@ func Test_fetchEtagIndex(t *testing.T) {
bucket := bucket.DeepCopy() bucket := bucket.DeepCopy()
bucket.Spec.Ignore = &ignore bucket.Spec.Ignore = &ignore
index := index.NewDigester() index := newEtagIndex()
err := fetchEtagIndex(context.TODO(), client, bucket.DeepCopy(), index, tmp) err = fetchEtagIndex(context.TODO(), client, bucket.DeepCopy(), index, tmp)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -195,16 +212,20 @@ func Test_fetchFiles(t *testing.T) {
} }
t.Run("fetches files", func(t *testing.T) { t.Run("fetches files", func(t *testing.T) {
tmp := t.TempDir() tmp, err := os.MkdirTemp("", "test-bucket")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmp)
client := mockBucketClient{bucketName: bucketName} client := mockBucketClient{bucketName: bucketName}
client.addObject("foo.yaml", mockBucketObject{data: "foo.yaml", etag: "etag1"}) client.addObject("foo.yaml", mockBucketObject{data: "foo.yaml", etag: "etag1"})
client.addObject("bar.yaml", mockBucketObject{data: "bar.yaml", etag: "etag2"}) client.addObject("bar.yaml", mockBucketObject{data: "bar.yaml", etag: "etag2"})
client.addObject("baz.yaml", mockBucketObject{data: "baz.yaml", etag: "etag3"}) client.addObject("baz.yaml", mockBucketObject{data: "baz.yaml", etag: "etag3"})
index := client.objectsToDigestIndex() index := client.objectsToEtagIndex()
err := fetchIndexFiles(context.TODO(), client, bucket.DeepCopy(), index, tmp) err = fetchIndexFiles(context.TODO(), client, bucket.DeepCopy(), index, tmp)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -219,26 +240,34 @@ func Test_fetchFiles(t *testing.T) {
}) })
t.Run("an error while fetching returns an error for the whole procedure", func(t *testing.T) { t.Run("an error while fetching returns an error for the whole procedure", func(t *testing.T) {
tmp := t.TempDir() tmp, err := os.MkdirTemp("", "test-bucket")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmp)
client := mockBucketClient{bucketName: bucketName, objects: map[string]mockBucketObject{}} client := mockBucketClient{bucketName: bucketName, objects: map[string]mockBucketObject{}}
client.objects["error"] = mockBucketObject{} client.objects["error"] = mockBucketObject{}
err := fetchIndexFiles(context.TODO(), client, bucket.DeepCopy(), client.objectsToDigestIndex(), tmp) err = fetchIndexFiles(context.TODO(), client, bucket.DeepCopy(), client.objectsToEtagIndex(), tmp)
if err == nil { if err == nil {
t.Fatal("expected error but got nil") t.Fatal("expected error but got nil")
} }
}) })
t.Run("a changed etag updates the index", func(t *testing.T) { t.Run("a changed etag updates the index", func(t *testing.T) {
tmp := t.TempDir() tmp, err := os.MkdirTemp("", "test-bucket")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmp)
client := mockBucketClient{bucketName: bucketName} client := mockBucketClient{bucketName: bucketName}
client.addObject("foo.yaml", mockBucketObject{data: "foo.yaml", etag: "etag2"}) client.addObject("foo.yaml", mockBucketObject{data: "foo.yaml", etag: "etag2"})
index := index.NewDigester() index := newEtagIndex()
index.Add("foo.yaml", "etag1") index.Add("foo.yaml", "etag1")
err := fetchIndexFiles(context.TODO(), client, bucket.DeepCopy(), index, tmp) err = fetchIndexFiles(context.TODO(), client, bucket.DeepCopy(), index, tmp)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -247,17 +276,21 @@ func Test_fetchFiles(t *testing.T) {
}) })
t.Run("a disappeared index entry is removed from the index", func(t *testing.T) { t.Run("a disappeared index entry is removed from the index", func(t *testing.T) {
tmp := t.TempDir() tmp, err := os.MkdirTemp("", "test-bucket")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmp)
client := mockBucketClient{bucketName: bucketName} client := mockBucketClient{bucketName: bucketName}
client.addObject("foo.yaml", mockBucketObject{data: "foo.yaml", etag: "etag1"}) client.addObject("foo.yaml", mockBucketObject{data: "foo.yaml", etag: "etag1"})
index := index.NewDigester() index := newEtagIndex()
index.Add("foo.yaml", "etag1") index.Add("foo.yaml", "etag1")
// Does not exist on server // Does not exist on server
index.Add("bar.yaml", "etag2") index.Add("bar.yaml", "etag2")
err := fetchIndexFiles(context.TODO(), client, bucket.DeepCopy(), index, tmp) err = fetchIndexFiles(context.TODO(), client, bucket.DeepCopy(), index, tmp)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -268,16 +301,20 @@ func Test_fetchFiles(t *testing.T) {
t.Run("can fetch more than maxConcurrentFetches", func(t *testing.T) { t.Run("can fetch more than maxConcurrentFetches", func(t *testing.T) {
// this will fail if, for example, the semaphore is not used correctly and blocks // this will fail if, for example, the semaphore is not used correctly and blocks
tmp := t.TempDir() tmp, err := os.MkdirTemp("", "test-bucket")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmp)
client := mockBucketClient{bucketName: bucketName} client := mockBucketClient{bucketName: bucketName}
for i := 0; i < 2*maxConcurrentBucketFetches; i++ { for i := 0; i < 2*maxConcurrentBucketFetches; i++ {
f := fmt.Sprintf("file-%d", i) f := fmt.Sprintf("file-%d", i)
client.addObject(f, mockBucketObject{etag: f, data: f}) client.addObject(f, mockBucketObject{etag: f, data: f})
} }
index := client.objectsToDigestIndex() index := client.objectsToEtagIndex()
err := fetchIndexFiles(context.TODO(), client, bucket.DeepCopy(), index, tmp) err = fetchIndexFiles(context.TODO(), client, bucket.DeepCopy(), index, tmp)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,784 @@
/*
Copyright 2020 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"time"
securejoin "github.com/cyphar/filepath-securejoin"
"github.com/fluxcd/pkg/runtime/logger"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
kuberecorder "k8s.io/client-go/tools/record"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/ratelimiter"
"github.com/fluxcd/pkg/apis/meta"
"github.com/fluxcd/pkg/runtime/conditions"
helper "github.com/fluxcd/pkg/runtime/controller"
"github.com/fluxcd/pkg/runtime/events"
"github.com/fluxcd/pkg/runtime/patch"
"github.com/fluxcd/pkg/runtime/predicates"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
serror "github.com/fluxcd/source-controller/internal/error"
sreconcile "github.com/fluxcd/source-controller/internal/reconcile"
"github.com/fluxcd/source-controller/internal/reconcile/summarize"
"github.com/fluxcd/source-controller/internal/util"
"github.com/fluxcd/source-controller/pkg/git"
"github.com/fluxcd/source-controller/pkg/git/libgit2/managed"
"github.com/fluxcd/source-controller/pkg/git/strategy"
"github.com/fluxcd/source-controller/pkg/sourceignore"
)
// gitRepositoryReadyCondition contains the information required to summarize a
// v1beta2.GitRepository Ready Condition.
var gitRepositoryReadyCondition = summarize.Conditions{
Target: meta.ReadyCondition,
Owned: []string{
sourcev1.StorageOperationFailedCondition,
sourcev1.FetchFailedCondition,
sourcev1.IncludeUnavailableCondition,
sourcev1.ArtifactOutdatedCondition,
sourcev1.ArtifactInStorageCondition,
sourcev1.SourceVerifiedCondition,
meta.ReadyCondition,
meta.ReconcilingCondition,
meta.StalledCondition,
},
Summarize: []string{
sourcev1.StorageOperationFailedCondition,
sourcev1.FetchFailedCondition,
sourcev1.IncludeUnavailableCondition,
sourcev1.ArtifactOutdatedCondition,
sourcev1.ArtifactInStorageCondition,
sourcev1.SourceVerifiedCondition,
meta.StalledCondition,
meta.ReconcilingCondition,
},
NegativePolarity: []string{
sourcev1.StorageOperationFailedCondition,
sourcev1.FetchFailedCondition,
sourcev1.IncludeUnavailableCondition,
sourcev1.ArtifactOutdatedCondition,
meta.StalledCondition,
meta.ReconcilingCondition,
},
}
// gitRepositoryFailConditions contains the conditions that represent a failure.
var gitRepositoryFailConditions = []string{
sourcev1.FetchFailedCondition,
sourcev1.IncludeUnavailableCondition,
sourcev1.StorageOperationFailedCondition,
}
// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=gitrepositories,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=gitrepositories/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=gitrepositories/finalizers,verbs=get;create;update;patch;delete
// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch
// GitRepositoryReconciler reconciles a v1beta2.GitRepository object.
type GitRepositoryReconciler struct {
client.Client
kuberecorder.EventRecorder
helper.Metrics
Storage *Storage
ControllerName string
requeueDependency time.Duration
}
type GitRepositoryReconcilerOptions struct {
MaxConcurrentReconciles int
DependencyRequeueInterval time.Duration
RateLimiter ratelimiter.RateLimiter
}
// gitRepositoryReconcileFunc is the function type for all the
// v1beta2.GitRepository (sub)reconcile functions.
type gitRepositoryReconcileFunc func(ctx context.Context, obj *sourcev1.GitRepository, commit *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error)
func (r *GitRepositoryReconciler) SetupWithManager(mgr ctrl.Manager) error {
return r.SetupWithManagerAndOptions(mgr, GitRepositoryReconcilerOptions{})
}
func (r *GitRepositoryReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts GitRepositoryReconcilerOptions) error {
r.requeueDependency = opts.DependencyRequeueInterval
return ctrl.NewControllerManagedBy(mgr).
For(&sourcev1.GitRepository{}, builder.WithPredicates(
predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{}),
)).
WithOptions(controller.Options{
MaxConcurrentReconciles: opts.MaxConcurrentReconciles,
RateLimiter: opts.RateLimiter,
}).
Complete(r)
}
func (r *GitRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, retErr error) {
start := time.Now()
log := ctrl.LoggerFrom(ctx)
// Fetch the GitRepository
obj := &sourcev1.GitRepository{}
if err := r.Get(ctx, req.NamespacedName, obj); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
// Record suspended status metric
r.RecordSuspend(ctx, obj, obj.Spec.Suspend)
// Return early if the object is suspended
if obj.Spec.Suspend {
log.Info("reconciliation is suspended for this object")
return ctrl.Result{}, nil
}
// Initialize the patch helper with the current version of the object.
patchHelper, err := patch.NewHelper(obj, r.Client)
if err != nil {
return ctrl.Result{}, err
}
// recResult stores the abstracted reconcile result.
var recResult sreconcile.Result
// Always attempt to patch the object and status after each reconciliation
// NOTE: The final runtime result and error are set in this block.
defer func() {
summarizeHelper := summarize.NewHelper(r.EventRecorder, patchHelper)
summarizeOpts := []summarize.Option{
summarize.WithConditions(gitRepositoryReadyCondition),
summarize.WithReconcileResult(recResult),
summarize.WithReconcileError(retErr),
summarize.WithIgnoreNotFound(),
summarize.WithProcessors(
summarize.RecordContextualError,
summarize.RecordReconcileReq,
),
summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetRequeueAfter()}),
summarize.WithPatchFieldOwner(r.ControllerName),
}
result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...)
// Always record readiness and duration metrics
r.Metrics.RecordReadiness(ctx, obj)
r.Metrics.RecordDuration(ctx, obj, start)
}()
// Add finalizer first if not exist to avoid the race condition
// between init and delete
if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) {
controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer)
recResult = sreconcile.ResultRequeue
return
}
// Examine if the object is under deletion
if !obj.ObjectMeta.DeletionTimestamp.IsZero() {
recResult, retErr = r.reconcileDelete(ctx, obj)
return
}
// Reconcile actual object
reconcilers := []gitRepositoryReconcileFunc{
r.reconcileStorage,
r.reconcileSource,
r.reconcileInclude,
r.reconcileArtifact,
}
recResult, retErr = r.reconcile(ctx, obj, reconcilers)
return
}
// reconcile iterates through the gitRepositoryReconcileFunc tasks for the
// object. It returns early on the first call that returns
// reconcile.ResultRequeue, or produces an error.
func (r *GitRepositoryReconciler) reconcile(ctx context.Context, obj *sourcev1.GitRepository, reconcilers []gitRepositoryReconcileFunc) (sreconcile.Result, error) {
oldObj := obj.DeepCopy()
// Mark as reconciling if generation differs
if obj.Generation != obj.Status.ObservedGeneration {
conditions.MarkReconciling(obj, "NewGeneration", "reconciling new object generation (%d)", obj.Generation)
}
// Create temp dir for Git clone
tmpDir, err := util.TempDirForObj("", obj)
if err != nil {
e := &serror.Event{
Err: fmt.Errorf("failed to create temporary working directory: %w", err),
Reason: sourcev1.DirCreationFailedReason,
}
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
}
defer func() {
if err = os.RemoveAll(tmpDir); err != nil {
ctrl.LoggerFrom(ctx).Error(err, "failed to remove temporary working directory")
}
}()
conditions.Delete(obj, sourcev1.StorageOperationFailedCondition)
// Run the sub-reconcilers and build the result of reconciliation.
var (
commit git.Commit
includes artifactSet
res sreconcile.Result
resErr error
)
for _, rec := range reconcilers {
recResult, err := rec(ctx, obj, &commit, &includes, tmpDir)
// Exit immediately on ResultRequeue.
if recResult == sreconcile.ResultRequeue {
return sreconcile.ResultRequeue, nil
}
// If an error is received, prioritize the returned results because an
// error also means immediate requeue.
if err != nil {
resErr = err
res = recResult
break
}
// Prioritize requeue request in the result.
res = sreconcile.LowestRequeuingResult(res, recResult)
}
r.notify(oldObj, obj, commit, res, resErr)
return res, resErr
}
// notify emits notification related to the reconciliation.
func (r *GitRepositoryReconciler) notify(oldObj, newObj *sourcev1.GitRepository, commit git.Commit, res sreconcile.Result, resErr error) {
// Notify successful reconciliation for new artifact and recovery from any
// failure.
if resErr == nil && res == sreconcile.ResultSuccess && newObj.Status.Artifact != nil {
annotations := map[string]string{
sourcev1.GroupVersion.Group + "/revision": newObj.Status.Artifact.Revision,
sourcev1.GroupVersion.Group + "/checksum": newObj.Status.Artifact.Checksum,
}
var oldChecksum string
if oldObj.GetArtifact() != nil {
oldChecksum = oldObj.GetArtifact().Checksum
}
message := fmt.Sprintf("stored artifact for commit '%s'", commit.ShortMessage())
// Notify on new artifact and failure recovery.
if oldChecksum != newObj.GetArtifact().Checksum {
r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal,
"NewArtifact", message)
} else {
if sreconcile.FailureRecovery(oldObj, newObj, gitRepositoryFailConditions) {
r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal,
meta.SucceededReason, message)
}
}
}
}
// reconcileStorage ensures the current state of the storage matches the
// desired and previously observed state.
//
// All Artifacts for the object except for the current one in the Status are
// garbage collected from the Storage.
// If the Artifact in the Status of the object disappeared from the Storage,
// it is removed from the object.
// If the object does not have an Artifact in its Status, a Reconciling
// condition is added.
// The hostname of any URL in the Status of the object are updated, to ensure
// they match the Storage server hostname of current runtime.
func (r *GitRepositoryReconciler) reconcileStorage(ctx context.Context,
obj *sourcev1.GitRepository, _ *git.Commit, _ *artifactSet, _ string) (sreconcile.Result, error) {
// Garbage collect previous advertised artifact(s) from storage
_ = r.garbageCollect(ctx, obj)
// Determine if the advertised artifact is still in storage
if artifact := obj.GetArtifact(); artifact != nil && !r.Storage.ArtifactExist(*artifact) {
obj.Status.Artifact = nil
obj.Status.URL = ""
// Remove the condition as the artifact doesn't exist.
conditions.Delete(obj, sourcev1.ArtifactInStorageCondition)
}
// Record that we do not have an artifact
if obj.GetArtifact() == nil {
conditions.MarkReconciling(obj, "NoArtifact", "no artifact for resource in storage")
conditions.Delete(obj, sourcev1.ArtifactInStorageCondition)
return sreconcile.ResultSuccess, nil
}
// Always update URLs to ensure hostname is up-to-date
// TODO(hidde): we may want to send out an event only if we notice the URL has changed
r.Storage.SetArtifactURL(obj.GetArtifact())
obj.Status.URL = r.Storage.SetHostname(obj.Status.URL)
return sreconcile.ResultSuccess, nil
}
// reconcileSource ensures the upstream Git repository and reference can be
// cloned and checked out using the specified configuration, and observes its
// state.
//
// The repository is cloned to the given dir, using the specified configuration
// to check out the reference. In case of an error during this process
// (including transient errors), it records v1beta2.FetchFailedCondition=True
// and returns early.
// On a successful checkout, it removes v1beta2.FetchFailedCondition and
// compares the current revision of HEAD to the revision of the Artifact in the
// Status of the object. It records v1beta2.ArtifactOutdatedCondition=True when
// they differ.
// If specified, the signature of the Git commit is verified. If the signature
// can not be verified or the verification fails, it records
// v1beta2.SourceVerifiedCondition=False and returns early. When successful,
// it records v1beta2.SourceVerifiedCondition=True.
// When all the above is successful, the given Commit pointer is set to the
// commit of the checked out Git repository.
func (r *GitRepositoryReconciler) reconcileSource(ctx context.Context,
obj *sourcev1.GitRepository, commit *git.Commit, _ *artifactSet, dir string) (sreconcile.Result, error) {
// Configure authentication strategy to access the source
var authOpts *git.AuthOptions
var err error
if obj.Spec.SecretRef != nil {
// Attempt to retrieve secret
name := types.NamespacedName{
Namespace: obj.GetNamespace(),
Name: obj.Spec.SecretRef.Name,
}
var secret corev1.Secret
if err := r.Client.Get(ctx, name, &secret); err != nil {
e := &serror.Event{
Err: fmt.Errorf("failed to get secret '%s': %w", name.String(), err),
Reason: sourcev1.AuthenticationFailedReason,
}
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
// Return error as the world as observed may change
return sreconcile.ResultEmpty, e
}
// Configure strategy with secret
authOpts, err = git.AuthOptionsFromSecret(obj.Spec.URL, &secret)
} else {
// Set the minimal auth options for valid transport.
authOpts, err = git.AuthOptionsWithoutSecret(obj.Spec.URL)
}
if err != nil {
e := &serror.Event{
Err: fmt.Errorf("failed to configure auth strategy for Git implementation '%s': %w", obj.Spec.GitImplementation, err),
Reason: sourcev1.AuthenticationFailedReason,
}
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
// Return error as the contents of the secret may change
return sreconcile.ResultEmpty, e
}
// Configure checkout strategy
checkoutOpts := git.CheckoutOptions{RecurseSubmodules: obj.Spec.RecurseSubmodules}
if ref := obj.Spec.Reference; ref != nil {
checkoutOpts.Branch = ref.Branch
checkoutOpts.Commit = ref.Commit
checkoutOpts.Tag = ref.Tag
checkoutOpts.SemVer = ref.SemVer
}
checkoutStrategy, err := strategy.CheckoutStrategyForImplementation(ctx,
git.Implementation(obj.Spec.GitImplementation), checkoutOpts)
if err != nil {
e := &serror.Stalling{
Err: fmt.Errorf("failed to configure checkout strategy for Git implementation '%s': %w", obj.Spec.GitImplementation, err),
Reason: sourcev1.GitOperationFailedReason,
}
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
// Do not return err as recovery without changes is impossible
return sreconcile.ResultEmpty, e
}
repositoryURL := obj.Spec.URL
// managed GIT transport only affects the libgit2 implementation
if managed.Enabled() && obj.Spec.GitImplementation == sourcev1.LibGit2Implementation {
// At present only HTTP connections have the ability to define remote options.
// Although this can be easily extended by ensuring that the fake URL below uses the
// target ssh scheme, and the libgit2/managed/ssh.go pulls that information accordingly.
//
// This is due to the fact the key libgit2 remote callbacks do not take place for HTTP
// whilst most still work for SSH.
if strings.HasPrefix(repositoryURL, "http") {
// Due to the lack of the callback feature, a fake target URL is created to allow
// for the smart sub transport be able to pick the options specific for this
// GitRepository object.
// The URL should use unique information that do not collide in a multi tenant
// deployment.
repositoryURL = fmt.Sprintf("http://%s/%s/%d", obj.Name, obj.UID, obj.Generation)
managed.AddTransportOptions(repositoryURL,
managed.TransportOptions{
TargetURL: obj.Spec.URL,
CABundle: authOpts.CAFile,
})
// We remove the options from memory, to avoid accumulating unused options over time.
defer managed.RemoveTransportOptions(repositoryURL)
}
}
// Checkout HEAD of reference in object
gitCtx, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration)
defer cancel()
c, err := checkoutStrategy.Checkout(gitCtx, dir, repositoryURL, authOpts)
if err != nil {
e := &serror.Event{
Err: fmt.Errorf("failed to checkout and determine revision: %w", err),
Reason: sourcev1.GitOperationFailedReason,
}
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
// Coin flip on transient or persistent error, return error and hope for the best
return sreconcile.ResultEmpty, e
}
// Assign the commit to the shared commit reference.
*commit = *c
ctrl.LoggerFrom(ctx).V(logger.DebugLevel).Info("git repository checked out", "url", obj.Spec.URL, "revision", commit.String())
conditions.Delete(obj, sourcev1.FetchFailedCondition)
// Verify commit signature
if result, err := r.verifyCommitSignature(ctx, obj, *commit); err != nil || result == sreconcile.ResultEmpty {
return result, err
}
// Mark observations about the revision on the object
if !obj.GetArtifact().HasRevision(commit.String()) {
message := fmt.Sprintf("new upstream revision '%s'", commit.String())
conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", message)
conditions.MarkReconciling(obj, "NewRevision", message)
}
return sreconcile.ResultSuccess, nil
}
// reconcileArtifact archives a new Artifact to the Storage, if the current
// (Status) data on the object does not match the given.
//
// The inspection of the given data to the object is differed, ensuring any
// stale observations like v1beta2.ArtifactOutdatedCondition are removed.
// If the given Artifact and/or artifactSet (includes) do not differ from the
// object's current, it returns early.
// Source ignore patterns are loaded, and the given directory is archived while
// taking these patterns into account.
// On a successful archive, the Artifact and Includes in the Status of the
// object are set, and the symlink in the Storage is updated to its path.
func (r *GitRepositoryReconciler) reconcileArtifact(ctx context.Context,
obj *sourcev1.GitRepository, commit *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) {
// Create potential new artifact with current available metadata
artifact := r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), commit.String(), fmt.Sprintf("%s.tar.gz", commit.Hash.String()))
// Set the ArtifactInStorageCondition if there's no drift.
defer func() {
if obj.GetArtifact().HasRevision(artifact.Revision) && !includes.Diff(obj.Status.IncludedArtifacts) {
conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition)
conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason,
"stored artifact for revision '%s'", artifact.Revision)
}
}()
// The artifact is up-to-date
if obj.GetArtifact().HasRevision(artifact.Revision) && !includes.Diff(obj.Status.IncludedArtifacts) {
r.eventLogf(ctx, obj, events.EventTypeTrace, sourcev1.ArtifactUpToDateReason, "artifact up-to-date with remote revision: '%s'", artifact.Revision)
return sreconcile.ResultSuccess, nil
}
// Ensure target path exists and is a directory
if f, err := os.Stat(dir); err != nil {
e := &serror.Event{
Err: fmt.Errorf("failed to stat target artifact path: %w", err),
Reason: sourcev1.StatOperationFailedReason,
}
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
} else if !f.IsDir() {
e := &serror.Event{
Err: fmt.Errorf("invalid target path: '%s' is not a directory", dir),
Reason: sourcev1.InvalidPathReason,
}
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
}
// Ensure artifact directory exists and acquire lock
if err := r.Storage.MkdirAll(artifact); err != nil {
e := &serror.Event{
Err: fmt.Errorf("failed to create artifact directory: %w", err),
Reason: sourcev1.DirCreationFailedReason,
}
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
}
unlock, err := r.Storage.Lock(artifact)
if err != nil {
return sreconcile.ResultEmpty, &serror.Event{
Err: fmt.Errorf("failed to acquire lock for artifact: %w", err),
Reason: meta.FailedReason,
}
}
defer unlock()
// Load ignore rules for archiving
ignoreDomain := strings.Split(dir, string(filepath.Separator))
ps, err := sourceignore.LoadIgnorePatterns(dir, ignoreDomain)
if err != nil {
return sreconcile.ResultEmpty, &serror.Event{
Err: fmt.Errorf("failed to load source ignore patterns from repository: %w", err),
Reason: "SourceIgnoreError",
}
}
if obj.Spec.Ignore != nil {
ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*obj.Spec.Ignore), ignoreDomain)...)
}
// Archive directory to storage
if err := r.Storage.Archive(&artifact, dir, SourceIgnoreFilter(ps, ignoreDomain)); err != nil {
e := &serror.Event{
Err: fmt.Errorf("unable to archive artifact to storage: %w", err),
Reason: sourcev1.ArchiveOperationFailedReason,
}
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
}
// Record it on the object
obj.Status.Artifact = artifact.DeepCopy()
obj.Status.IncludedArtifacts = *includes
// Update symlink on a "best effort" basis
url, err := r.Storage.Symlink(artifact, "latest.tar.gz")
if err != nil {
r.eventLogf(ctx, obj, events.EventTypeTrace, sourcev1.SymlinkUpdateFailedReason,
"failed to update status URL symlink: %s", err)
}
if url != "" {
obj.Status.URL = url
}
conditions.Delete(obj, sourcev1.StorageOperationFailedCondition)
return sreconcile.ResultSuccess, nil
}
// reconcileInclude reconciles the on the object specified
// v1beta2.GitRepositoryInclude list by copying their Artifact (sub)contents to
// the specified paths in the given directory.
//
// When one of the includes is unavailable, it marks the object with
// v1beta2.IncludeUnavailableCondition=True and returns early.
// When the copy operations are successful, it removes the
// v1beta2.IncludeUnavailableCondition from the object.
// When the composed artifactSet differs from the current set in the Status of
// the object, it marks the object with v1beta2.ArtifactOutdatedCondition=True.
func (r *GitRepositoryReconciler) reconcileInclude(ctx context.Context,
obj *sourcev1.GitRepository, _ *git.Commit, includes *artifactSet, dir string) (sreconcile.Result, error) {
artifacts := make(artifactSet, len(obj.Spec.Include))
for i, incl := range obj.Spec.Include {
// Do this first as it is much cheaper than copy operations
toPath, err := securejoin.SecureJoin(dir, incl.GetToPath())
if err != nil {
e := &serror.Event{
Err: fmt.Errorf("path calculation for include '%s' failed: %w", incl.GitRepositoryRef.Name, err),
Reason: "IllegalPath",
}
conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
}
// Retrieve the included GitRepository
dep := &sourcev1.GitRepository{}
if err := r.Get(ctx, types.NamespacedName{Namespace: obj.Namespace, Name: incl.GitRepositoryRef.Name}, dep); err != nil {
e := &serror.Event{
Err: fmt.Errorf("could not get resource for include '%s': %w", incl.GitRepositoryRef.Name, err),
Reason: "NotFound",
}
conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
}
// Confirm include has an artifact
if dep.GetArtifact() == nil {
e := &serror.Event{
Err: fmt.Errorf("no artifact available for include '%s'", incl.GitRepositoryRef.Name),
Reason: "NoArtifact",
}
conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
}
// Copy artifact (sub)contents to configured directory
if err := r.Storage.CopyToPath(dep.GetArtifact(), incl.GetFromPath(), toPath); err != nil {
e := &serror.Event{
Err: fmt.Errorf("failed to copy '%s' include from %s to %s: %w", incl.GitRepositoryRef.Name, incl.GetFromPath(), incl.GetToPath(), err),
Reason: "CopyFailure",
}
conditions.MarkTrue(obj, sourcev1.IncludeUnavailableCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
}
artifacts[i] = dep.GetArtifact().DeepCopy()
}
// We now know all includes are available
conditions.Delete(obj, sourcev1.IncludeUnavailableCondition)
// Observe if the artifacts still match the previous included ones
if artifacts.Diff(obj.Status.IncludedArtifacts) {
message := fmt.Sprintf("included artifacts differ from last observed includes")
conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "IncludeChange", message)
conditions.MarkReconciling(obj, "IncludeChange", message)
}
// Persist the artifactSet.
*includes = artifacts
return sreconcile.ResultSuccess, nil
}
// verifyCommitSignature verifies the signature of the given Git commit, if a
// verification mode is specified on the object.
// If the signature can not be verified or the verification fails, it records
// v1beta2.SourceVerifiedCondition=False and returns.
// When successful, it records v1beta2.SourceVerifiedCondition=True.
// If no verification mode is specified on the object, the
// v1beta2.SourceVerifiedCondition Condition is removed.
func (r *GitRepositoryReconciler) verifyCommitSignature(ctx context.Context, obj *sourcev1.GitRepository, commit git.Commit) (sreconcile.Result, error) {
// Check if there is a commit verification is configured and remove any old
// observations if there is none
if obj.Spec.Verification == nil || obj.Spec.Verification.Mode == "" {
conditions.Delete(obj, sourcev1.SourceVerifiedCondition)
return sreconcile.ResultSuccess, nil
}
// Get secret with GPG data
publicKeySecret := types.NamespacedName{
Namespace: obj.Namespace,
Name: obj.Spec.Verification.SecretRef.Name,
}
secret := &corev1.Secret{}
if err := r.Client.Get(ctx, publicKeySecret, secret); err != nil {
e := &serror.Event{
Err: fmt.Errorf("PGP public keys secret error: %w", err),
Reason: "VerificationError",
}
conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
}
var keyRings []string
for _, v := range secret.Data {
keyRings = append(keyRings, string(v))
}
// Verify commit with GPG data from secret
if _, err := commit.Verify(keyRings...); err != nil {
e := &serror.Event{
Err: fmt.Errorf("signature verification of commit '%s' failed: %w", commit.Hash.String(), err),
Reason: "InvalidCommitSignature",
}
conditions.MarkFalse(obj, sourcev1.SourceVerifiedCondition, e.Reason, e.Err.Error())
// Return error in the hope the secret changes
return sreconcile.ResultEmpty, e
}
conditions.MarkTrue(obj, sourcev1.SourceVerifiedCondition, meta.SucceededReason,
"verified signature of commit '%s'", commit.Hash.String())
r.eventLogf(ctx, obj, events.EventTypeTrace, "VerifiedCommit",
"verified signature of commit '%s'", commit.Hash.String())
return sreconcile.ResultSuccess, nil
}
// reconcileDelete handles the deletion of the object.
// It first garbage collects all Artifacts for the object from the Storage.
// Removing the finalizer from the object if successful.
func (r *GitRepositoryReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.GitRepository) (sreconcile.Result, error) {
// Garbage collect the resource's artifacts
if err := r.garbageCollect(ctx, obj); err != nil {
// Return the error so we retry the failed garbage collection
return sreconcile.ResultEmpty, err
}
// Remove our finalizer from the list
controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer)
// Stop reconciliation as the object is being deleted
return sreconcile.ResultEmpty, nil
}
// garbageCollect performs a garbage collection for the given object.
//
// It removes all but the current Artifact from the Storage, unless the
// deletion timestamp on the object is set. Which will result in the
// removal of all Artifacts for the objects.
func (r *GitRepositoryReconciler) garbageCollect(ctx context.Context, obj *sourcev1.GitRepository) error {
if !obj.DeletionTimestamp.IsZero() {
if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil {
return &serror.Event{
Err: fmt.Errorf("garbage collection for deleted resource failed: %w", err),
Reason: "GarbageCollectionFailed",
}
} else if deleted != "" {
r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded",
"garbage collected artifacts for deleted resource")
}
obj.Status.Artifact = nil
return nil
}
if obj.GetArtifact() != nil {
delFiles, err := r.Storage.GarbageCollect(ctx, *obj.GetArtifact(), time.Second*5)
if err != nil {
return &serror.Event{
Err: fmt.Errorf("garbage collection of artifacts failed: %w", err),
Reason: "GarbageCollectionFailed",
}
}
if len(delFiles) > 0 {
r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded",
fmt.Sprintf("garbage collected %d artifacts", len(delFiles)))
return nil
}
}
return nil
}
// eventLogf records events, and logs at the same time.
//
// This log is different from the debug log in the EventRecorder, in the sense
// that this is a simple log. While the debug log contains complete details
// about the event.
func (r *GitRepositoryReconciler) eventLogf(ctx context.Context, obj runtime.Object, eventType string, reason string, messageFmt string, args ...interface{}) {
msg := fmt.Sprintf(messageFmt, args...)
// Log and emit event.
if eventType == corev1.EventTypeWarning {
ctrl.LoggerFrom(ctx).Error(errors.New(reason), msg)
} else {
ctrl.LoggerFrom(ctx).Info(msg)
}
r.Eventf(obj, eventType, reason, msg)
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -14,54 +14,46 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package controller package controllers
import ( import (
"bytes"
"context" "context"
"crypto/tls"
"errors" "errors"
"fmt" "fmt"
"net/url" "net/url"
"strings"
"time" "time"
"github.com/docker/go-units" "github.com/docker/go-units"
"github.com/opencontainers/go-digest"
helmgetter "helm.sh/helm/v3/pkg/getter" helmgetter "helm.sh/helm/v3/pkg/getter"
helmreg "helm.sh/helm/v3/pkg/registry"
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
kuberecorder "k8s.io/client-go/tools/record" kuberecorder "k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
ctrl "sigs.k8s.io/controller-runtime" ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/ratelimiter"
eventv1 "github.com/fluxcd/pkg/apis/event/v1beta1"
"github.com/fluxcd/pkg/apis/meta" "github.com/fluxcd/pkg/apis/meta"
"github.com/fluxcd/pkg/runtime/conditions" "github.com/fluxcd/pkg/runtime/conditions"
helper "github.com/fluxcd/pkg/runtime/controller" helper "github.com/fluxcd/pkg/runtime/controller"
"github.com/fluxcd/pkg/runtime/jitter" "github.com/fluxcd/pkg/runtime/events"
"github.com/fluxcd/pkg/runtime/patch" "github.com/fluxcd/pkg/runtime/patch"
"github.com/fluxcd/pkg/runtime/predicates" "github.com/fluxcd/pkg/runtime/predicates"
rreconcile "github.com/fluxcd/pkg/runtime/reconcile"
sourcev1 "github.com/fluxcd/source-controller/api/v1" sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
"github.com/fluxcd/source-controller/internal/cache"
intdigest "github.com/fluxcd/source-controller/internal/digest"
serror "github.com/fluxcd/source-controller/internal/error" serror "github.com/fluxcd/source-controller/internal/error"
"github.com/fluxcd/source-controller/internal/helm/getter" "github.com/fluxcd/source-controller/internal/helm/getter"
"github.com/fluxcd/source-controller/internal/helm/repository" "github.com/fluxcd/source-controller/internal/helm/repository"
intpredicates "github.com/fluxcd/source-controller/internal/predicates"
sreconcile "github.com/fluxcd/source-controller/internal/reconcile" sreconcile "github.com/fluxcd/source-controller/internal/reconcile"
"github.com/fluxcd/source-controller/internal/reconcile/summarize" "github.com/fluxcd/source-controller/internal/reconcile/summarize"
) )
// helmRepositoryReadyCondition contains the information required to summarize a // helmRepositoryReadyCondition contains the information required to summarize a
// v1.HelmRepository Ready Condition. // v1beta2.HelmRepository Ready Condition.
var helmRepositoryReadyCondition = summarize.Conditions{ var helmRepositoryReadyCondition = summarize.Conditions{
Target: meta.ReadyCondition, Target: meta.ReadyCondition,
Owned: []string{ Owned: []string{
@ -102,7 +94,7 @@ var helmRepositoryFailConditions = []string{
// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmrepositories/finalizers,verbs=get;create;update;patch;delete // +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmrepositories/finalizers,verbs=get;create;update;patch;delete
// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch // +kubebuilder:rbac:groups="",resources=events,verbs=create;patch
// HelmRepositoryReconciler reconciles a v1.HelmRepository object. // HelmRepositoryReconciler reconciles a v1beta2.HelmRepository object.
type HelmRepositoryReconciler struct { type HelmRepositoryReconciler struct {
client.Client client.Client
kuberecorder.EventRecorder kuberecorder.EventRecorder
@ -111,41 +103,30 @@ type HelmRepositoryReconciler struct {
Getters helmgetter.Providers Getters helmgetter.Providers
Storage *Storage Storage *Storage
ControllerName string ControllerName string
Cache *cache.Cache
TTL time.Duration
*cache.CacheRecorder
patchOptions []patch.Option
} }
type HelmRepositoryReconcilerOptions struct { type HelmRepositoryReconcilerOptions struct {
RateLimiter workqueue.TypedRateLimiter[reconcile.Request] MaxConcurrentReconciles int
RateLimiter ratelimiter.RateLimiter
} }
// helmRepositoryReconcileFunc is the function type for all the // helmRepositoryReconcileFunc is the function type for all the
// v1.HelmRepository (sub)reconcile functions. The type implementations // v1beta2.HelmRepository (sub)reconcile functions. The type implementations
// are grouped and executed serially to perform the complete reconcile of the // are grouped and executed serially to perform the complete reconcile of the
// object. // object.
type helmRepositoryReconcileFunc func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) type helmRepositoryReconcileFunc func(ctx context.Context, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error)
func (r *HelmRepositoryReconciler) SetupWithManager(mgr ctrl.Manager) error { func (r *HelmRepositoryReconciler) SetupWithManager(mgr ctrl.Manager) error {
return r.SetupWithManagerAndOptions(mgr, HelmRepositoryReconcilerOptions{}) return r.SetupWithManagerAndOptions(mgr, HelmRepositoryReconcilerOptions{})
} }
func (r *HelmRepositoryReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts HelmRepositoryReconcilerOptions) error { func (r *HelmRepositoryReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts HelmRepositoryReconcilerOptions) error {
r.patchOptions = getPatchOptions(helmRepositoryReadyCondition.Owned, r.ControllerName)
return ctrl.NewControllerManagedBy(mgr). return ctrl.NewControllerManagedBy(mgr).
For(&sourcev1.HelmRepository{}). For(&sourcev1.HelmRepository{}).
WithEventFilter( WithEventFilter(predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{})).
predicate.And(
intpredicates.HelmRepositoryOCIMigrationPredicate{},
predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{}),
),
).
WithOptions(controller.Options{ WithOptions(controller.Options{
RateLimiter: opts.RateLimiter, MaxConcurrentReconciles: opts.MaxConcurrentReconciles,
RateLimiter: opts.RateLimiter,
}). }).
Complete(r) Complete(r)
} }
@ -160,12 +141,19 @@ func (r *HelmRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reque
return ctrl.Result{}, client.IgnoreNotFound(err) return ctrl.Result{}, client.IgnoreNotFound(err)
} }
// Initialize the patch helper with the current version of the object. // Record suspended status metric
serialPatcher := patch.NewSerialPatcher(obj, r.Client) r.RecordSuspend(ctx, obj, obj.Spec.Suspend)
// If it's of type OCI, migrate the object to static. // Return early if the object is suspended
if obj.Spec.Type == sourcev1.HelmRepositoryTypeOCI { if obj.Spec.Suspend {
return r.migrationToStatic(ctx, serialPatcher, obj) log.Info("reconciliation is suspended for this object")
return ctrl.Result{}, nil
}
// Initialize the patch helper with the current version of the object.
patchHelper, err := patch.NewHelper(obj, r.Client)
if err != nil {
return ctrl.Result{}, err
} }
// recResult stores the abstracted reconcile result. // recResult stores the abstracted reconcile result.
@ -174,47 +162,37 @@ func (r *HelmRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reque
// Always attempt to patch the object after each reconciliation. // Always attempt to patch the object after each reconciliation.
// NOTE: The final runtime result and error are set in this block. // NOTE: The final runtime result and error are set in this block.
defer func() { defer func() {
summarizeHelper := summarize.NewHelper(r.EventRecorder, serialPatcher) summarizeHelper := summarize.NewHelper(r.EventRecorder, patchHelper)
summarizeOpts := []summarize.Option{ summarizeOpts := []summarize.Option{
summarize.WithConditions(helmRepositoryReadyCondition), summarize.WithConditions(helmRepositoryReadyCondition),
summarize.WithReconcileResult(recResult), summarize.WithReconcileResult(recResult),
summarize.WithReconcileError(retErr), summarize.WithReconcileError(retErr),
summarize.WithIgnoreNotFound(), summarize.WithIgnoreNotFound(),
summarize.WithProcessors( summarize.WithProcessors(
summarize.ErrorActionHandler, summarize.RecordContextualError,
summarize.RecordReconcileReq, summarize.RecordReconcileReq,
), ),
summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{ summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetRequeueAfter()}),
RequeueAfter: jitter.JitteredIntervalDuration(obj.GetRequeueAfter()),
}),
summarize.WithPatchFieldOwner(r.ControllerName), summarize.WithPatchFieldOwner(r.ControllerName),
} }
result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...) result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...)
// Always record duration metrics. // Always record readiness and duration metrics
r.Metrics.RecordReadiness(ctx, obj)
r.Metrics.RecordDuration(ctx, obj, start) r.Metrics.RecordDuration(ctx, obj, start)
}() }()
// Examine if the object is under deletion.
if !obj.ObjectMeta.DeletionTimestamp.IsZero() {
recResult, retErr = r.reconcileDelete(ctx, obj)
return
}
// Add finalizer first if not exist to avoid the race condition // Add finalizer first if not exist to avoid the race condition
// between init and delete. // between init and delete
// Note: Finalizers in general can only be added when the deletionTimestamp
// is not set.
if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) { if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) {
controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer) controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer)
recResult = sreconcile.ResultRequeue recResult = sreconcile.ResultRequeue
return return
} }
// Return if the object is suspended. // Examine if the object is under deletion
if obj.Spec.Suspend { if !obj.ObjectMeta.DeletionTimestamp.IsZero() {
log.Info("reconciliation is suspended for this object") recResult, retErr = r.reconcileDelete(ctx, obj)
recResult, retErr = sreconcile.ResultEmpty, nil
return return
} }
@ -224,36 +202,19 @@ func (r *HelmRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reque
r.reconcileSource, r.reconcileSource,
r.reconcileArtifact, r.reconcileArtifact,
} }
recResult, retErr = r.reconcile(ctx, serialPatcher, obj, reconcilers) recResult, retErr = r.reconcile(ctx, obj, reconcilers)
return return
} }
// reconcile iterates through the helmRepositoryReconcileFunc tasks for the // reconcile iterates through the helmRepositoryReconcileFunc tasks for the
// object. It returns early on the first call that returns // object. It returns early on the first call that returns
// reconcile.ResultRequeue, or produces an error. // reconcile.ResultRequeue, or produces an error.
func (r *HelmRepositoryReconciler) reconcile(ctx context.Context, sp *patch.SerialPatcher, func (r *HelmRepositoryReconciler) reconcile(ctx context.Context, obj *sourcev1.HelmRepository, reconcilers []helmRepositoryReconcileFunc) (sreconcile.Result, error) {
obj *sourcev1.HelmRepository, reconcilers []helmRepositoryReconcileFunc) (sreconcile.Result, error) {
oldObj := obj.DeepCopy() oldObj := obj.DeepCopy()
rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason, "reconciliation in progress") // Mark as reconciling if generation differs.
if obj.Generation != obj.Status.ObservedGeneration {
var reconcileAtVal string conditions.MarkReconciling(obj, "NewGeneration", "reconciling new object generation (%d)", obj.Generation)
if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok {
reconcileAtVal = v
}
// Persist reconciling if generation differs or reconciliation is requested.
switch {
case obj.Generation != obj.Status.ObservedGeneration:
rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason,
"processing object: new generation %d -> %d", obj.Status.ObservedGeneration, obj.Generation)
if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil {
return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason)
}
case reconcileAtVal != obj.Status.GetLastHandledReconcileRequest():
if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil {
return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason)
}
} }
var chartRepo repository.ChartRepository var chartRepo repository.ChartRepository
@ -263,7 +224,7 @@ func (r *HelmRepositoryReconciler) reconcile(ctx context.Context, sp *patch.Seri
var res sreconcile.Result var res sreconcile.Result
var resErr error var resErr error
for _, rec := range reconcilers { for _, rec := range reconcilers {
recResult, err := rec(ctx, sp, obj, &artifact, &chartRepo) recResult, err := rec(ctx, obj, &artifact, &chartRepo)
// Exit immediately on ResultRequeue. // Exit immediately on ResultRequeue.
if recResult == sreconcile.ResultRequeue { if recResult == sreconcile.ResultRequeue {
return sreconcile.ResultRequeue, nil return sreconcile.ResultRequeue, nil
@ -279,19 +240,19 @@ func (r *HelmRepositoryReconciler) reconcile(ctx context.Context, sp *patch.Seri
res = sreconcile.LowestRequeuingResult(res, recResult) res = sreconcile.LowestRequeuingResult(res, recResult)
} }
r.notify(ctx, oldObj, obj, &chartRepo, res, resErr) r.notify(oldObj, obj, chartRepo, res, resErr)
return res, resErr return res, resErr
} }
// notify emits notification related to the reconciliation. // notify emits notification related to the reconciliation.
func (r *HelmRepositoryReconciler) notify(ctx context.Context, oldObj, newObj *sourcev1.HelmRepository, chartRepo *repository.ChartRepository, res sreconcile.Result, resErr error) { func (r *HelmRepositoryReconciler) notify(oldObj, newObj *sourcev1.HelmRepository, chartRepo repository.ChartRepository, res sreconcile.Result, resErr error) {
// Notify successful reconciliation for new artifact and recovery from any // Notify successful reconciliation for new artifact and recovery from any
// failure. // failure.
if resErr == nil && res == sreconcile.ResultSuccess && newObj.Status.Artifact != nil { if resErr == nil && res == sreconcile.ResultSuccess && newObj.Status.Artifact != nil {
annotations := map[string]string{ annotations := map[string]string{
fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaRevisionKey): newObj.Status.Artifact.Revision, sourcev1.GroupVersion.Group + "/revision": newObj.Status.Artifact.Revision,
fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaDigestKey): newObj.Status.Artifact.Digest, sourcev1.GroupVersion.Group + "/checksum": newObj.Status.Artifact.Checksum,
} }
humanReadableSize := "unknown size" humanReadableSize := "unknown size"
@ -299,18 +260,21 @@ func (r *HelmRepositoryReconciler) notify(ctx context.Context, oldObj, newObj *s
humanReadableSize = fmt.Sprintf("size %s", units.HumanSize(float64(*size))) humanReadableSize = fmt.Sprintf("size %s", units.HumanSize(float64(*size)))
} }
var oldChecksum string
if oldObj.GetArtifact() != nil {
oldChecksum = oldObj.GetArtifact().Checksum
}
message := fmt.Sprintf("stored fetched index of %s from '%s'", humanReadableSize, chartRepo.URL) message := fmt.Sprintf("stored fetched index of %s from '%s'", humanReadableSize, chartRepo.URL)
// Notify on new artifact and failure recovery. // Notify on new artifact and failure recovery.
if !oldObj.GetArtifact().HasDigest(newObj.GetArtifact().Digest) { if oldChecksum != newObj.GetArtifact().Checksum {
r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal, r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal,
"NewArtifact", message) "NewArtifact", message)
ctrl.LoggerFrom(ctx).Info(message)
} else { } else {
if sreconcile.FailureRecovery(oldObj, newObj, helmRepositoryFailConditions) { if sreconcile.FailureRecovery(oldObj, newObj, helmRepositoryFailConditions) {
r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal, r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal,
meta.SucceededReason, message) meta.SucceededReason, message)
ctrl.LoggerFrom(ctx).Info(message)
} }
} }
} }
@ -319,59 +283,30 @@ func (r *HelmRepositoryReconciler) notify(ctx context.Context, oldObj, newObj *s
// reconcileStorage ensures the current state of the storage matches the // reconcileStorage ensures the current state of the storage matches the
// desired and previously observed state. // desired and previously observed state.
// //
// The garbage collection is executed based on the flag configured settings and // All Artifacts for the object except for the current one in the Status are
// may remove files that are beyond their TTL or the maximum number of files // garbage collected from the Storage.
// to survive a collection cycle.
// If the Artifact in the Status of the object disappeared from the Storage, // If the Artifact in the Status of the object disappeared from the Storage,
// it is removed from the object. // it is removed from the object.
// If the object does not have an Artifact in its Status, a Reconciling // If the object does not have an Artifact in its Status, a Reconciling
// condition is added. // condition is added.
// The hostname of any URL in the Status of the object are updated, to ensure // The hostname of any URL in the Status of the object are updated, to ensure
// they match the Storage server hostname of current runtime. // they match the Storage server hostname of current runtime.
func (r *HelmRepositoryReconciler) reconcileStorage(ctx context.Context, sp *patch.SerialPatcher, func (r *HelmRepositoryReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.HelmRepository, _ *sourcev1.Artifact, _ *repository.ChartRepository) (sreconcile.Result, error) {
obj *sourcev1.HelmRepository, _ *sourcev1.Artifact, _ *repository.ChartRepository) (sreconcile.Result, error) {
// Garbage collect previous advertised artifact(s) from storage // Garbage collect previous advertised artifact(s) from storage
_ = r.garbageCollect(ctx, obj) _ = r.garbageCollect(ctx, obj)
var artifactMissing bool // Determine if the advertised artifact is still in storage
if artifact := obj.GetArtifact(); artifact != nil { if artifact := obj.GetArtifact(); artifact != nil && !r.Storage.ArtifactExist(*artifact) {
// Determine if the advertised artifact is still in storage obj.Status.Artifact = nil
if !r.Storage.ArtifactExist(*artifact) { obj.Status.URL = ""
artifactMissing = true // Remove the condition as the artifact doesn't exist.
} conditions.Delete(obj, sourcev1.ArtifactInStorageCondition)
// If the artifact is in storage, verify if the advertised digest still
// matches the actual artifact
if !artifactMissing {
if err := r.Storage.VerifyArtifact(*artifact); err != nil {
r.Eventf(obj, corev1.EventTypeWarning, "ArtifactVerificationFailed", "failed to verify integrity of artifact: %s", err.Error())
if err = r.Storage.Remove(*artifact); err != nil {
return sreconcile.ResultEmpty, fmt.Errorf("failed to remove artifact after digest mismatch: %w", err)
}
artifactMissing = true
}
}
// If the artifact is missing, remove it from the object
if artifactMissing {
obj.Status.Artifact = nil
obj.Status.URL = ""
}
} }
// Record that we do not have an artifact // Record that we do not have an artifact
if obj.GetArtifact() == nil { if obj.GetArtifact() == nil {
msg := "building artifact" conditions.MarkReconciling(obj, "NoArtifact", "no artifact for resource in storage")
if artifactMissing {
msg += ": disappeared from storage"
}
rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "%s", msg)
conditions.Delete(obj, sourcev1.ArtifactInStorageCondition) conditions.Delete(obj, sourcev1.ArtifactInStorageCondition)
if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil {
return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason)
}
return sreconcile.ResultSuccess, nil return sreconcile.ResultSuccess, nil
} }
@ -384,138 +319,122 @@ func (r *HelmRepositoryReconciler) reconcileStorage(ctx context.Context, sp *pat
} }
// reconcileSource attempts to fetch the Helm repository index using the // reconcileSource attempts to fetch the Helm repository index using the
// specified configuration on the v1.HelmRepository object. // specified configuration on the v1beta2.HelmRepository object.
// //
// When the fetch fails, it records v1.FetchFailedCondition=True and // When the fetch fails, it records v1beta2.FetchFailedCondition=True and
// returns early. // returns early.
// If successful and the index is valid, any previous // If successful and the index is valid, any previous
// v1.FetchFailedCondition is removed, and the repository.ChartRepository // v1beta2.FetchFailedCondition is removed, and the repository.ChartRepository
// pointer is set to the newly fetched index. // pointer is set to the newly fetched index.
func (r *HelmRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch.SerialPatcher, func (r *HelmRepositoryReconciler) reconcileSource(ctx context.Context, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, chartRepo *repository.ChartRepository) (sreconcile.Result, error) {
obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, chartRepo *repository.ChartRepository) (sreconcile.Result, error) { var tlsConfig *tls.Config
// Ensure it's not an OCI URL. API validation ensures that only
// http/https/oci scheme are allowed. // Configure Helm client to access repository
if strings.HasPrefix(obj.Spec.URL, helmreg.OCIScheme) { clientOpts := []helmgetter.Option{
err := fmt.Errorf("'oci' URL scheme cannot be used with 'default' HelmRepository type") helmgetter.WithTimeout(obj.Spec.Timeout.Duration),
e := serror.NewStalling( helmgetter.WithURL(obj.Spec.URL),
fmt.Errorf("invalid Helm repository URL: %w", err), helmgetter.WithPassCredentialsAll(obj.Spec.PassCredentials),
sourcev1.URLInvalidReason,
)
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e)
return sreconcile.ResultEmpty, e
} }
normalizedURL, err := repository.NormalizeURL(obj.Spec.URL) // Configure any authentication related options
if err != nil { if obj.Spec.SecretRef != nil {
e := serror.NewStalling( // Attempt to retrieve secret
fmt.Errorf("invalid Helm repository URL: %w", err), name := types.NamespacedName{
sourcev1.URLInvalidReason, Namespace: obj.GetNamespace(),
) Name: obj.Spec.SecretRef.Name,
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) }
return sreconcile.ResultEmpty, e var secret corev1.Secret
} if err := r.Client.Get(ctx, name, &secret); err != nil {
e := &serror.Event{
Err: fmt.Errorf("failed to get secret '%s': %w", name.String(), err),
Reason: sourcev1.AuthenticationFailedReason,
}
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
}
clientOpts, _, err := getter.GetClientOpts(ctx, r.Client, obj, normalizedURL) // Construct actual options
if err != nil { opts, err := getter.ClientOptionsFromSecret(secret)
if errors.Is(err, getter.ErrDeprecatedTLSConfig) { if err != nil {
ctrl.LoggerFrom(ctx). e := &serror.Event{
Info("warning: specifying TLS authentication data via `.spec.secretRef` is deprecated, please use `.spec.certSecretRef` instead") Err: fmt.Errorf("failed to configure Helm client with secret data: %w", err),
} else { Reason: sourcev1.AuthenticationFailedReason,
e := serror.NewGeneric( }
err, conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
sourcev1.AuthenticationFailedReason, // Return err as the content of the secret may change.
) return sreconcile.ResultEmpty, e
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) }
clientOpts = append(clientOpts, opts...)
tlsConfig, err = getter.TLSClientConfigFromSecret(secret, obj.Spec.URL)
if err != nil {
e := &serror.Event{
Err: fmt.Errorf("failed to create TLS client config with secret data: %w", err),
Reason: sourcev1.AuthenticationFailedReason,
}
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
// Requeue as content of secret might change
return sreconcile.ResultEmpty, e return sreconcile.ResultEmpty, e
} }
} }
// Construct Helm chart repository with options and download index // Construct Helm chart repository with options and download index
newChartRepo, err := repository.NewChartRepository(obj.Spec.URL, "", r.Getters, clientOpts.TlsConfig, clientOpts.GetterOpts...) newChartRepo, err := repository.NewChartRepository(obj.Spec.URL, "", r.Getters, tlsConfig, clientOpts)
if err != nil { if err != nil {
switch err.(type) { switch err.(type) {
case *url.Error: case *url.Error:
e := serror.NewStalling( e := &serror.Stalling{
fmt.Errorf("invalid Helm repository URL: %w", err), Err: fmt.Errorf("invalid Helm repository URL: %w", err),
sourcev1.URLInvalidReason, Reason: sourcev1.URLInvalidReason,
) }
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e return sreconcile.ResultEmpty, e
default: default:
e := serror.NewStalling( e := &serror.Stalling{
fmt.Errorf("failed to construct Helm client: %w", err), Err: fmt.Errorf("failed to construct Helm client: %w", err),
meta.FailedReason, Reason: meta.FailedReason,
) }
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e return sreconcile.ResultEmpty, e
} }
} }
checksum, err := newChartRepo.CacheIndex()
// Fetch the repository index from remote. if err != nil {
if err := newChartRepo.CacheIndex(); err != nil { e := &serror.Event{
e := serror.NewGeneric( Err: fmt.Errorf("failed to fetch Helm repository index: %w", err),
fmt.Errorf("failed to fetch Helm repository index: %w", err), Reason: meta.FailedReason,
meta.FailedReason, }
) conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e)
// Coin flip on transient or persistent error, return error and hope for the best // Coin flip on transient or persistent error, return error and hope for the best
return sreconcile.ResultEmpty, e return sreconcile.ResultEmpty, e
} }
*chartRepo = *newChartRepo *chartRepo = *newChartRepo
// Early comparison to current Artifact.
if curArtifact := obj.GetArtifact(); curArtifact != nil {
curRev := digest.Digest(curArtifact.Revision)
if curRev.Validate() == nil {
// Short-circuit based on the fetched index being an exact match to the
// stored Artifact.
if newRev := chartRepo.Digest(curRev.Algorithm()); newRev.Validate() == nil && (newRev == curRev) {
*artifact = *curArtifact
conditions.Delete(obj, sourcev1.FetchFailedCondition)
return sreconcile.ResultSuccess, nil
}
}
}
// Load the cached repository index to ensure it passes validation. // Load the cached repository index to ensure it passes validation.
if err := chartRepo.LoadFromPath(); err != nil { if err := chartRepo.LoadFromCache(); err != nil {
e := serror.NewGeneric( e := &serror.Event{
fmt.Errorf("failed to load Helm repository from index YAML: %w", err), Err: fmt.Errorf("failed to load Helm repository from cache: %w", err),
sourcev1.IndexationFailedReason, Reason: sourcev1.IndexationFailedReason,
) }
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e) conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e
}
// Delete any stale failure observation
conditions.Delete(obj, sourcev1.FetchFailedCondition)
// Calculate revision.
revision := chartRepo.Digest(intdigest.Canonical)
if revision.Validate() != nil {
e := serror.NewGeneric(
fmt.Errorf("failed to calculate revision: %w", err),
sourcev1.IndexationFailedReason,
)
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e)
return sreconcile.ResultEmpty, e return sreconcile.ResultEmpty, e
} }
defer chartRepo.Unload()
// Mark observations about the revision on the object. // Mark observations about the revision on the object.
message := fmt.Sprintf("new index revision '%s'", revision) if !obj.GetArtifact().HasRevision(checksum) {
if obj.GetArtifact() != nil { message := fmt.Sprintf("new index revision '%s'", checksum)
conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "%s", message) conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", message)
} conditions.MarkReconciling(obj, "NewRevision", message)
rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "building artifact: %s", message)
if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil {
return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason)
} }
conditions.Delete(obj, sourcev1.FetchFailedCondition)
// Create potential new artifact. // Create potential new artifact.
*artifact = r.Storage.NewArtifactFor(obj.Kind, *artifact = r.Storage.NewArtifactFor(obj.Kind,
obj.ObjectMeta.GetObjectMeta(), obj.ObjectMeta.GetObjectMeta(),
revision.String(), chartRepo.Checksum,
fmt.Sprintf("index-%s.yaml", revision.Encoded()), fmt.Sprintf("index-%s.yaml", checksum))
)
return sreconcile.ResultSuccess, nil return sreconcile.ResultSuccess, nil
} }
@ -524,91 +443,67 @@ func (r *HelmRepositoryReconciler) reconcileSource(ctx context.Context, sp *patc
// (Status) data on the object does not match the given. // (Status) data on the object does not match the given.
// //
// The inspection of the given data to the object is differed, ensuring any // The inspection of the given data to the object is differed, ensuring any
// stale observations like v1.ArtifactOutdatedCondition are removed. // stale observations like v1beta2.ArtifactOutdatedCondition are removed.
// If the given Artifact does not differ from the object's current, it returns // If the given Artifact does not differ from the object's current, it returns
// early. // early.
// On a successful archive, the Artifact in the Status of the object is set, // On a successful archive, the Artifact in the Status of the object is set,
// and the symlink in the Storage is updated to its path. // and the symlink in the Storage is updated to its path.
func (r *HelmRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, chartRepo *repository.ChartRepository) (sreconcile.Result, error) { func (r *HelmRepositoryReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, chartRepo *repository.ChartRepository) (sreconcile.Result, error) {
// Set the ArtifactInStorageCondition if there's no drift. // Set the ArtifactInStorageCondition if there's no drift.
defer func() { defer func() {
if obj.GetArtifact().HasRevision(artifact.Revision) { if obj.GetArtifact().HasRevision(artifact.Revision) {
conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition) conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition)
conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason,
"stored artifact: revision '%s'", artifact.Revision) "stored artifact for revision '%s'", artifact.Revision)
} }
if err := chartRepo.Clear(); err != nil {
if err := chartRepo.RemoveCache(); err != nil {
ctrl.LoggerFrom(ctx).Error(err, "failed to remove temporary cached index file") ctrl.LoggerFrom(ctx).Error(err, "failed to remove temporary cached index file")
} }
}() }()
if obj.GetArtifact().HasRevision(artifact.Revision) && obj.GetArtifact().HasDigest(artifact.Digest) { if obj.GetArtifact().HasRevision(artifact.Revision) {
// Extend TTL of the Index in the cache (if present). r.eventLogf(ctx, obj, events.EventTypeTrace, sourcev1.ArtifactUpToDateReason, "artifact up-to-date with remote revision: '%s'", artifact.Revision)
if r.Cache != nil {
r.Cache.SetExpiration(artifact.Path, r.TTL)
}
r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.ArtifactUpToDateReason, "artifact up-to-date with remote revision: '%s'", artifact.Revision)
return sreconcile.ResultSuccess, nil return sreconcile.ResultSuccess, nil
} }
// Create artifact dir // Create artifact dir
if err := r.Storage.MkdirAll(*artifact); err != nil { if err := r.Storage.MkdirAll(*artifact); err != nil {
e := serror.NewGeneric( e := &serror.Event{
fmt.Errorf("failed to create artifact directory: %w", err), Err: fmt.Errorf("failed to create artifact directory: %w", err),
sourcev1.DirCreationFailedReason, Reason: sourcev1.DirCreationFailedReason,
) }
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e) conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
return sreconcile.ResultEmpty, e return sreconcile.ResultEmpty, e
} }
// Acquire lock. // Acquire lock.
unlock, err := r.Storage.Lock(*artifact) unlock, err := r.Storage.Lock(*artifact)
if err != nil { if err != nil {
return sreconcile.ResultEmpty, serror.NewGeneric( return sreconcile.ResultEmpty, &serror.Event{
fmt.Errorf("failed to acquire lock for artifact: %w", err), Err: fmt.Errorf("failed to acquire lock for artifact: %w", err),
meta.FailedReason, Reason: meta.FailedReason,
) }
} }
defer unlock() defer unlock()
// Save artifact to storage in JSON format. // Save artifact to storage.
b, err := chartRepo.ToJSON() if err = r.Storage.CopyFromPath(artifact, chartRepo.CachePath); err != nil {
if err != nil { e := &serror.Event{
e := serror.NewGeneric( Err: fmt.Errorf("unable to save artifact to storage: %w", err),
fmt.Errorf("unable to get JSON index from chart repo: %w", err), Reason: sourcev1.ArchiveOperationFailedReason,
sourcev1.ArchiveOperationFailedReason, }
) conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e)
return sreconcile.ResultEmpty, e
}
if err = r.Storage.Copy(artifact, bytes.NewBuffer(b)); err != nil {
e := serror.NewGeneric(
fmt.Errorf("unable to save artifact to storage: %w", err),
sourcev1.ArchiveOperationFailedReason,
)
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e)
return sreconcile.ResultEmpty, e return sreconcile.ResultEmpty, e
} }
// Record it on the object. // Record it on the object.
obj.Status.Artifact = artifact.DeepCopy() obj.Status.Artifact = artifact.DeepCopy()
// Cache the index if it was successfully retrieved.
if r.Cache != nil && chartRepo.Index != nil {
// The cache keys have to be safe in multi-tenancy environments, as
// otherwise it could be used as a vector to bypass the repository's
// authentication. Using the Artifact.Path is safe as the path is in
// the format of: /<repository-name>/<chart-name>/<filename>.
if err := r.Cache.Set(artifact.Path, chartRepo.Index, r.TTL); err != nil {
r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.CacheOperationFailedReason, "failed to cache index: %s", err)
}
}
// Update index symlink. // Update index symlink.
indexURL, err := r.Storage.Symlink(*artifact, "index.yaml") indexURL, err := r.Storage.Symlink(*artifact, "index.yaml")
if err != nil { if err != nil {
r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.SymlinkUpdateFailedReason, r.eventLogf(ctx, obj, events.EventTypeTrace, sourcev1.SymlinkUpdateFailedReason,
"failed to update status URL symlink: %s", err) "failed to update status URL symlink: %s", err)
} }
if indexURL != "" { if indexURL != "" {
@ -628,16 +523,8 @@ func (r *HelmRepositoryReconciler) reconcileDelete(ctx context.Context, obj *sou
return sreconcile.ResultEmpty, err return sreconcile.ResultEmpty, err
} }
// Remove our finalizer from the list if we are deleting the object // Remove our finalizer from the list
if !obj.DeletionTimestamp.IsZero() { controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer)
controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer)
}
// Delete cache metrics.
if r.CacheRecorder != nil && r.Metrics.IsDelete(obj) {
r.DeleteCacheEvent(cache.CacheEventTypeHit, obj.Name, obj.Namespace)
r.DeleteCacheEvent(cache.CacheEventTypeMiss, obj.Name, obj.Namespace)
}
// Stop reconciliation as the object is being deleted // Stop reconciliation as the object is being deleted
return sreconcile.ResultEmpty, nil return sreconcile.ResultEmpty, nil
@ -645,39 +532,34 @@ func (r *HelmRepositoryReconciler) reconcileDelete(ctx context.Context, obj *sou
// garbageCollect performs a garbage collection for the given object. // garbageCollect performs a garbage collection for the given object.
// //
// It removes all but the current Artifact from the Storage, unless: // It removes all but the current Artifact from the Storage, unless the
// - the deletion timestamp on the object is set // deletion timestamp on the object is set. Which will result in the
// - the obj.Spec.Type has changed and artifacts are not supported by the new type // removal of all Artifacts for the objects.
// Which will result in the removal of all Artifacts for the objects.
func (r *HelmRepositoryReconciler) garbageCollect(ctx context.Context, obj *sourcev1.HelmRepository) error { func (r *HelmRepositoryReconciler) garbageCollect(ctx context.Context, obj *sourcev1.HelmRepository) error {
if !obj.DeletionTimestamp.IsZero() || (obj.Spec.Type != "" && obj.Spec.Type != sourcev1.HelmRepositoryTypeDefault) { if !obj.DeletionTimestamp.IsZero() {
if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil { if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil {
return serror.NewGeneric( return &serror.Event{
fmt.Errorf("garbage collection for deleted resource failed: %w", err), Err: fmt.Errorf("garbage collection for deleted resource failed: %w", err),
"GarbageCollectionFailed", Reason: "GarbageCollectionFailed",
) }
} else if deleted != "" { } else if deleted != "" {
r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded", r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded",
"garbage collected artifacts for deleted resource") "garbage collected artifacts for deleted resource")
} }
// Clean status sub-resource
obj.Status.Artifact = nil obj.Status.Artifact = nil
obj.Status.URL = ""
// Remove any stale conditions.
obj.Status.Conditions = nil
return nil return nil
} }
if obj.GetArtifact() != nil { if obj.GetArtifact() != nil {
delFiles, err := r.Storage.GarbageCollect(ctx, *obj.GetArtifact(), time.Second*5) delFiles, err := r.Storage.GarbageCollect(ctx, *obj.GetArtifact(), time.Second*5)
if err != nil { if err != nil {
return serror.NewGeneric( return &serror.Event{
fmt.Errorf("garbage collection of artifacts failed: %w", err), Err: fmt.Errorf("garbage collection of artifacts failed: %w", err),
"GarbageCollectionFailed", Reason: "GarbageCollectionFailed",
) }
} }
if len(delFiles) > 0 { if len(delFiles) > 0 {
r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded", r.eventLogf(ctx, obj, events.EventTypeTrace, "GarbageCollectionSucceeded",
"garbage collected %d artifacts", len(delFiles)) fmt.Sprintf("garbage collected %d artifacts", len(delFiles)))
return nil return nil
} }
} }
@ -699,31 +581,3 @@ func (r *HelmRepositoryReconciler) eventLogf(ctx context.Context, obj runtime.Ob
} }
r.Eventf(obj, eventType, reason, msg) r.Eventf(obj, eventType, reason, msg)
} }
// migrateToStatic is HelmRepository OCI migration to static object.
func (r *HelmRepositoryReconciler) migrationToStatic(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmRepository) (result ctrl.Result, err error) {
// Skip migration if suspended and not being deleted.
if obj.Spec.Suspend && obj.DeletionTimestamp.IsZero() {
return ctrl.Result{}, nil
}
if !intpredicates.HelmRepositoryOCIRequireMigration(obj) {
// Already migrated, nothing to do.
return ctrl.Result{}, nil
}
// Delete any artifact.
_, err = r.reconcileDelete(ctx, obj)
if err != nil {
return ctrl.Result{}, err
}
// Delete finalizer and reset the status.
controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer)
obj.Status = sourcev1.HelmRepositoryStatus{}
if err := sp.Patch(ctx, obj); err != nil {
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}

View File

@ -0,0 +1,961 @@
/*
Copyright 2020 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"errors"
"fmt"
"net/http"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/darkowlzz/controller-check/status"
"github.com/fluxcd/pkg/apis/meta"
"github.com/fluxcd/pkg/helmtestserver"
"github.com/fluxcd/pkg/runtime/conditions"
"github.com/fluxcd/pkg/runtime/patch"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/record"
kstatus "sigs.k8s.io/cli-utils/pkg/kstatus/status"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
"github.com/fluxcd/source-controller/internal/helm/repository"
sreconcile "github.com/fluxcd/source-controller/internal/reconcile"
"github.com/fluxcd/source-controller/internal/reconcile/summarize"
)
func TestHelmRepositoryReconciler_Reconcile(t *testing.T) {
g := NewWithT(t)
testServer, err := helmtestserver.NewTempHelmServer()
g.Expect(err).NotTo(HaveOccurred())
defer os.RemoveAll(testServer.Root())
g.Expect(testServer.PackageChart("testdata/charts/helmchart")).To(Succeed())
g.Expect(testServer.GenerateIndex()).To(Succeed())
testServer.Start()
defer testServer.Stop()
obj := &sourcev1.HelmRepository{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "helmrepository-reconcile-",
Namespace: "default",
},
Spec: sourcev1.HelmRepositorySpec{
Interval: metav1.Duration{Duration: interval},
URL: testServer.URL(),
},
}
g.Expect(testEnv.Create(ctx, obj)).To(Succeed())
key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace}
// Wait for finalizer to be set
g.Eventually(func() bool {
if err := testEnv.Get(ctx, key, obj); err != nil {
return false
}
return len(obj.Finalizers) > 0
}, timeout).Should(BeTrue())
// Wait for HelmRepository to be Ready
g.Eventually(func() bool {
if err := testEnv.Get(ctx, key, obj); err != nil {
return false
}
if !conditions.IsReady(obj) && obj.Status.Artifact == nil {
return false
}
readyCondition := conditions.Get(obj, meta.ReadyCondition)
return readyCondition.Status == metav1.ConditionTrue &&
obj.Generation == readyCondition.ObservedGeneration &&
obj.Generation == obj.Status.ObservedGeneration
}, timeout).Should(BeTrue())
// Check if the object status is valid.
condns := &status.Conditions{NegativePolarity: helmRepositoryReadyCondition.NegativePolarity}
checker := status.NewChecker(testEnv.Client, condns)
checker.CheckErr(ctx, obj)
// kstatus client conformance check.
u, err := patch.ToUnstructured(obj)
g.Expect(err).ToNot(HaveOccurred())
res, err := kstatus.Compute(u)
g.Expect(err).ToNot(HaveOccurred())
g.Expect(res.Status).To(Equal(kstatus.CurrentStatus))
// Patch the object with reconcile request annotation.
patchHelper, err := patch.NewHelper(obj, testEnv.Client)
g.Expect(err).ToNot(HaveOccurred())
annotations := map[string]string{
meta.ReconcileRequestAnnotation: "now",
}
obj.SetAnnotations(annotations)
g.Expect(patchHelper.Patch(ctx, obj)).ToNot(HaveOccurred())
g.Eventually(func() bool {
if err := testEnv.Get(ctx, key, obj); err != nil {
return false
}
return obj.Status.LastHandledReconcileAt == "now"
}, timeout).Should(BeTrue())
g.Expect(testEnv.Delete(ctx, obj)).To(Succeed())
// Wait for HelmRepository to be deleted
g.Eventually(func() bool {
if err := testEnv.Get(ctx, key, obj); err != nil {
return apierrors.IsNotFound(err)
}
return false
}, timeout).Should(BeTrue())
}
func TestHelmRepositoryReconciler_reconcileStorage(t *testing.T) {
tests := []struct {
name string
beforeFunc func(obj *sourcev1.HelmRepository, storage *Storage) error
want sreconcile.Result
wantErr bool
assertArtifact *sourcev1.Artifact
assertConditions []metav1.Condition
assertPaths []string
}{
{
name: "garbage collects",
beforeFunc: func(obj *sourcev1.HelmRepository, storage *Storage) error {
revisions := []string{"a", "b", "c", "d"}
for n := range revisions {
v := revisions[n]
obj.Status.Artifact = &sourcev1.Artifact{
Path: fmt.Sprintf("/reconcile-storage/%s.txt", v),
Revision: v,
}
if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil {
return err
}
if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader(v), 0o640); err != nil {
return err
}
if n != len(revisions)-1 {
time.Sleep(time.Second * 1)
}
}
testStorage.SetArtifactURL(obj.Status.Artifact)
return nil
},
assertArtifact: &sourcev1.Artifact{
Path: "/reconcile-storage/d.txt",
Revision: "d",
Checksum: "18ac3e7343f016890c510e93f935261169d9e3f565436429830faf0934f4f8e4",
URL: testStorage.Hostname + "/reconcile-storage/d.txt",
Size: int64p(int64(len("d"))),
},
assertPaths: []string{
"/reconcile-storage/d.txt",
"/reconcile-storage/c.txt",
"!/reconcile-storage/b.txt",
"!/reconcile-storage/a.txt",
},
want: sreconcile.ResultSuccess,
},
{
name: "notices missing artifact in storage",
beforeFunc: func(obj *sourcev1.HelmRepository, storage *Storage) error {
obj.Status.Artifact = &sourcev1.Artifact{
Path: "/reconcile-storage/invalid.txt",
Revision: "d",
}
testStorage.SetArtifactURL(obj.Status.Artifact)
return nil
},
want: sreconcile.ResultSuccess,
assertPaths: []string{
"!/reconcile-storage/invalid.txt",
},
assertConditions: []metav1.Condition{
*conditions.TrueCondition(meta.ReconcilingCondition, "NoArtifact", "no artifact for resource in storage"),
},
},
{
name: "updates hostname on diff from current",
beforeFunc: func(obj *sourcev1.HelmRepository, storage *Storage) error {
obj.Status.Artifact = &sourcev1.Artifact{
Path: "/reconcile-storage/hostname.txt",
Revision: "f",
Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80",
URL: "http://outdated.com/reconcile-storage/hostname.txt",
}
if err := testStorage.MkdirAll(*obj.Status.Artifact); err != nil {
return err
}
if err := testStorage.AtomicWriteFile(obj.Status.Artifact, strings.NewReader("file"), 0o640); err != nil {
return err
}
return nil
},
want: sreconcile.ResultSuccess,
assertPaths: []string{
"/reconcile-storage/hostname.txt",
},
assertArtifact: &sourcev1.Artifact{
Path: "/reconcile-storage/hostname.txt",
Revision: "f",
Checksum: "3b9c358f36f0a31b6ad3e14f309c7cf198ac9246e8316f9ce543d5b19ac02b80",
URL: testStorage.Hostname + "/reconcile-storage/hostname.txt",
Size: int64p(int64(len("file"))),
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
r := &HelmRepositoryReconciler{
EventRecorder: record.NewFakeRecorder(32),
Storage: testStorage,
}
obj := &sourcev1.HelmRepository{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "test-",
},
}
if tt.beforeFunc != nil {
g.Expect(tt.beforeFunc(obj, testStorage)).To(Succeed())
}
var chartRepo repository.ChartRepository
var artifact sourcev1.Artifact
got, err := r.reconcileStorage(context.TODO(), obj, &artifact, &chartRepo)
g.Expect(err != nil).To(Equal(tt.wantErr))
g.Expect(got).To(Equal(tt.want))
g.Expect(obj.Status.Artifact).To(MatchArtifact(tt.assertArtifact))
if tt.assertArtifact != nil && tt.assertArtifact.URL != "" {
g.Expect(obj.Status.Artifact.URL).To(Equal(tt.assertArtifact.URL))
}
g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions))
for _, p := range tt.assertPaths {
absoluteP := filepath.Join(testStorage.BasePath, p)
if !strings.HasPrefix(p, "!") {
g.Expect(absoluteP).To(BeAnExistingFile())
continue
}
g.Expect(absoluteP).NotTo(BeAnExistingFile())
}
})
}
}
func TestHelmRepositoryReconciler_reconcileSource(t *testing.T) {
type options struct {
username string
password string
publicKey []byte
privateKey []byte
ca []byte
}
tests := []struct {
name string
protocol string
server options
secret *corev1.Secret
beforeFunc func(t *WithT, obj *sourcev1.HelmRepository)
afterFunc func(t *WithT, obj *sourcev1.HelmRepository)
want sreconcile.Result
wantErr bool
assertConditions []metav1.Condition
}{
{
name: "HTTP without secretRef makes ArtifactOutdated=True",
protocol: "http",
want: sreconcile.ResultSuccess,
assertConditions: []metav1.Condition{
*conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"),
*conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new index revision"),
},
},
{
name: "HTTP with Basic Auth secret makes ArtifactOutdated=True",
protocol: "http",
server: options{
username: "git",
password: "1234",
},
secret: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "basic-auth",
},
Data: map[string][]byte{
"username": []byte("git"),
"password": []byte("1234"),
},
},
beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) {
obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "basic-auth"}
},
want: sreconcile.ResultSuccess,
assertConditions: []metav1.Condition{
*conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"),
*conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new index revision"),
},
},
{
name: "HTTPS with CAFile secret makes ArtifactOutdated=True",
protocol: "https",
server: options{
publicKey: tlsPublicKey,
privateKey: tlsPrivateKey,
ca: tlsCA,
},
secret: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "ca-file",
},
Data: map[string][]byte{
"caFile": tlsCA,
},
},
beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) {
obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "ca-file"}
},
want: sreconcile.ResultSuccess,
assertConditions: []metav1.Condition{
*conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"),
*conditions.TrueCondition(meta.ReconcilingCondition, "NewRevision", "new index revision"),
},
},
{
name: "HTTPS with invalid CAFile secret makes FetchFailed=True and returns error",
protocol: "https",
server: options{
publicKey: tlsPublicKey,
privateKey: tlsPrivateKey,
ca: tlsCA,
},
secret: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "invalid-ca",
},
Data: map[string][]byte{
"caFile": []byte("invalid"),
},
},
beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) {
obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "invalid-ca"}
},
wantErr: true,
assertConditions: []metav1.Condition{
*conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to create TLS client config with secret data: cannot append certificate into certificate pool: invalid caFile"),
},
},
{
name: "Invalid URL makes FetchFailed=True and returns stalling error",
protocol: "http",
beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) {
obj.Spec.URL = strings.ReplaceAll(obj.Spec.URL, "http://", "")
},
want: sreconcile.ResultEmpty,
wantErr: true,
assertConditions: []metav1.Condition{
*conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.URLInvalidReason, "first path segment in URL cannot contain colon"),
},
},
{
name: "Unsupported scheme makes FetchFailed=True and returns stalling error",
protocol: "http",
beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) {
obj.Spec.URL = strings.ReplaceAll(obj.Spec.URL, "http://", "ftp://")
},
want: sreconcile.ResultEmpty,
wantErr: true,
assertConditions: []metav1.Condition{
*conditions.TrueCondition(sourcev1.FetchFailedCondition, meta.FailedReason, "scheme \"ftp\" not supported"),
},
},
{
name: "Missing secret returns FetchFailed=True and returns error",
protocol: "http",
beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) {
obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "non-existing"}
},
wantErr: true,
assertConditions: []metav1.Condition{
*conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "secrets \"non-existing\" not found"),
},
},
{
name: "Malformed secret returns FetchFailed=True and returns error",
protocol: "http",
secret: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "malformed-basic-auth",
},
Data: map[string][]byte{
"username": []byte("git"),
},
},
beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository) {
obj.Spec.SecretRef = &meta.LocalObjectReference{Name: "malformed-basic-auth"}
},
wantErr: true,
assertConditions: []metav1.Condition{
*conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "required fields 'username' and 'password"),
},
},
}
for _, tt := range tests {
obj := &sourcev1.HelmRepository{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "auth-strategy-",
},
Spec: sourcev1.HelmRepositorySpec{
Interval: metav1.Duration{Duration: interval},
Timeout: &metav1.Duration{Duration: interval},
},
}
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
server, err := helmtestserver.NewTempHelmServer()
g.Expect(err).NotTo(HaveOccurred())
defer os.RemoveAll(server.Root())
g.Expect(server.PackageChart("testdata/charts/helmchart")).To(Succeed())
g.Expect(server.GenerateIndex()).To(Succeed())
if len(tt.server.username+tt.server.password) > 0 {
server.WithMiddleware(func(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
u, p, ok := r.BasicAuth()
if !ok || u != tt.server.username || p != tt.server.password {
w.WriteHeader(401)
return
}
handler.ServeHTTP(w, r)
})
})
}
secret := tt.secret.DeepCopy()
switch tt.protocol {
case "http":
server.Start()
defer server.Stop()
obj.Spec.URL = server.URL()
case "https":
g.Expect(server.StartTLS(tt.server.publicKey, tt.server.privateKey, tt.server.ca, "example.com")).To(Succeed())
defer server.Stop()
obj.Spec.URL = server.URL()
default:
t.Fatalf("unsupported protocol %q", tt.protocol)
}
if tt.beforeFunc != nil {
tt.beforeFunc(g, obj)
}
builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme())
if secret != nil {
builder.WithObjects(secret.DeepCopy())
}
r := &HelmRepositoryReconciler{
EventRecorder: record.NewFakeRecorder(32),
Client: builder.Build(),
Storage: testStorage,
Getters: testGetters,
}
var chartRepo repository.ChartRepository
var artifact sourcev1.Artifact
got, err := r.reconcileSource(context.TODO(), obj, &artifact, &chartRepo)
defer os.Remove(chartRepo.CachePath)
g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions))
g.Expect(err != nil).To(Equal(tt.wantErr))
g.Expect(got).To(Equal(tt.want))
if tt.afterFunc != nil {
tt.afterFunc(g, obj)
}
})
}
}
func TestHelmRepositoryReconciler_reconcileArtifact(t *testing.T) {
tests := []struct {
name string
beforeFunc func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository)
afterFunc func(t *WithT, obj *sourcev1.HelmRepository)
want sreconcile.Result
wantErr bool
assertConditions []metav1.Condition
}{
{
name: "Archiving artifact to storage makes ArtifactInStorage=True",
beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) {
obj.Spec.Interval = metav1.Duration{Duration: interval}
},
want: sreconcile.ResultSuccess,
assertConditions: []metav1.Condition{
*conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'existing'"),
},
},
{
name: "Up-to-date artifact should not update status",
beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) {
obj.Spec.Interval = metav1.Duration{Duration: interval}
obj.Status.Artifact = artifact.DeepCopy()
},
afterFunc: func(t *WithT, obj *sourcev1.HelmRepository) {
t.Expect(obj.Status.URL).To(BeEmpty())
},
want: sreconcile.ResultSuccess,
assertConditions: []metav1.Condition{
*conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'existing'"),
},
},
{
name: "Removes ArtifactOutdatedCondition after creating a new artifact",
beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) {
obj.Spec.Interval = metav1.Duration{Duration: interval}
conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "Foo", "")
},
want: sreconcile.ResultSuccess,
assertConditions: []metav1.Condition{
*conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'existing'"),
},
},
{
name: "Creates latest symlink to the created artifact",
beforeFunc: func(t *WithT, obj *sourcev1.HelmRepository, artifact sourcev1.Artifact, index *repository.ChartRepository) {
obj.Spec.Interval = metav1.Duration{Duration: interval}
},
afterFunc: func(t *WithT, obj *sourcev1.HelmRepository) {
localPath := testStorage.LocalPath(*obj.GetArtifact())
symlinkPath := filepath.Join(filepath.Dir(localPath), "index.yaml")
targetFile, err := os.Readlink(symlinkPath)
t.Expect(err).NotTo(HaveOccurred())
t.Expect(localPath).To(Equal(targetFile))
},
want: sreconcile.ResultSuccess,
assertConditions: []metav1.Condition{
*conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision 'existing'"),
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
r := &HelmRepositoryReconciler{
EventRecorder: record.NewFakeRecorder(32),
Storage: testStorage,
}
obj := &sourcev1.HelmRepository{
TypeMeta: metav1.TypeMeta{
Kind: sourcev1.HelmRepositoryKind,
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "test-bucket-",
Generation: 1,
Namespace: "default",
},
Spec: sourcev1.HelmRepositorySpec{
Timeout: &metav1.Duration{Duration: timeout},
URL: "https://example.com/index.yaml",
},
}
tmpDir, err := os.MkdirTemp("", "test-reconcile-artifact-")
g.Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpDir)
// Create an empty cache file.
cachePath := filepath.Join(tmpDir, "index.yaml")
cacheFile, err := os.Create(cachePath)
g.Expect(err).ToNot(HaveOccurred())
g.Expect(cacheFile.Close()).ToNot(HaveOccurred())
chartRepo, err := repository.NewChartRepository(obj.Spec.URL, "", testGetters, nil, nil)
g.Expect(err).ToNot(HaveOccurred())
chartRepo.CachePath = cachePath
artifact := testStorage.NewArtifactFor(obj.Kind, obj, "existing", "foo.tar.gz")
// Checksum of the index file calculated by the ChartRepository.
artifact.Checksum = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
if tt.beforeFunc != nil {
tt.beforeFunc(g, obj, artifact, chartRepo)
}
got, err := r.reconcileArtifact(context.TODO(), obj, &artifact, chartRepo)
g.Expect(err != nil).To(Equal(tt.wantErr))
g.Expect(got).To(Equal(tt.want))
// On error, artifact is empty. Check artifacts only on successful
// reconcile.
if !tt.wantErr {
g.Expect(obj.Status.Artifact).To(MatchArtifact(artifact.DeepCopy()))
}
g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions))
if tt.afterFunc != nil {
tt.afterFunc(g, obj)
}
})
}
}
func TestHelmRepositoryReconciler_reconcileSubRecs(t *testing.T) {
// Helper to build simple helmRepositoryReconcileFunc with result and error.
buildReconcileFuncs := func(r sreconcile.Result, e error) helmRepositoryReconcileFunc {
return func(ctx context.Context, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) {
return r, e
}
}
tests := []struct {
name string
generation int64
observedGeneration int64
reconcileFuncs []helmRepositoryReconcileFunc
wantResult sreconcile.Result
wantErr bool
assertConditions []metav1.Condition
}{
{
name: "successful reconciliations",
reconcileFuncs: []helmRepositoryReconcileFunc{
buildReconcileFuncs(sreconcile.ResultSuccess, nil),
},
wantResult: sreconcile.ResultSuccess,
wantErr: false,
},
{
name: "successful reconciliation with generation difference",
generation: 3,
observedGeneration: 2,
reconcileFuncs: []helmRepositoryReconcileFunc{
buildReconcileFuncs(sreconcile.ResultSuccess, nil),
},
wantResult: sreconcile.ResultSuccess,
wantErr: false,
assertConditions: []metav1.Condition{
*conditions.TrueCondition(meta.ReconcilingCondition, "NewGeneration", "reconciling new object generation (3)"),
},
},
{
name: "failed reconciliation",
reconcileFuncs: []helmRepositoryReconcileFunc{
buildReconcileFuncs(sreconcile.ResultEmpty, fmt.Errorf("some error")),
},
wantResult: sreconcile.ResultEmpty,
wantErr: true,
},
{
name: "multiple object status conditions mutations",
reconcileFuncs: []helmRepositoryReconcileFunc{
func(ctx context.Context, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) {
conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision")
return sreconcile.ResultSuccess, nil
},
func(ctx context.Context, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error) {
conditions.MarkTrue(obj, meta.ReconcilingCondition, "Progressing", "creating artifact")
return sreconcile.ResultSuccess, nil
},
},
wantResult: sreconcile.ResultSuccess,
wantErr: false,
assertConditions: []metav1.Condition{
*conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "new index revision"),
*conditions.TrueCondition(meta.ReconcilingCondition, "Progressing", "creating artifact"),
},
},
{
name: "subrecs with one result=Requeue, no error",
reconcileFuncs: []helmRepositoryReconcileFunc{
buildReconcileFuncs(sreconcile.ResultSuccess, nil),
buildReconcileFuncs(sreconcile.ResultRequeue, nil),
buildReconcileFuncs(sreconcile.ResultSuccess, nil),
},
wantResult: sreconcile.ResultRequeue,
wantErr: false,
},
{
name: "subrecs with error before result=Requeue",
reconcileFuncs: []helmRepositoryReconcileFunc{
buildReconcileFuncs(sreconcile.ResultSuccess, nil),
buildReconcileFuncs(sreconcile.ResultEmpty, fmt.Errorf("some error")),
buildReconcileFuncs(sreconcile.ResultRequeue, nil),
},
wantResult: sreconcile.ResultEmpty,
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
r := &HelmRepositoryReconciler{}
obj := &sourcev1.HelmRepository{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "test-",
Generation: tt.generation,
},
Status: sourcev1.HelmRepositoryStatus{
ObservedGeneration: tt.observedGeneration,
},
}
ctx := context.TODO()
gotRes, gotErr := r.reconcile(ctx, obj, tt.reconcileFuncs)
g.Expect(gotErr != nil).To(Equal(tt.wantErr))
g.Expect(gotRes).To(Equal(tt.wantResult))
g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions))
})
}
}
func TestHelmRepositoryReconciler_statusConditions(t *testing.T) {
tests := []struct {
name string
beforeFunc func(obj *sourcev1.HelmRepository)
assertConditions []metav1.Condition
}{
{
name: "positive conditions only",
beforeFunc: func(obj *sourcev1.HelmRepository) {
conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision")
},
assertConditions: []metav1.Condition{
*conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "stored artifact for revision"),
*conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision"),
},
},
{
name: "multiple failures",
beforeFunc: func(obj *sourcev1.HelmRepository) {
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret")
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory")
conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "some error")
},
assertConditions: []metav1.Condition{
*conditions.FalseCondition(meta.ReadyCondition, sourcev1.DirCreationFailedReason, "failed to create directory"),
*conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"),
*conditions.TrueCondition(sourcev1.StorageOperationFailedCondition, sourcev1.DirCreationFailedReason, "failed to create directory"),
*conditions.TrueCondition(sourcev1.ArtifactOutdatedCondition, "NewRevision", "some error"),
},
},
{
name: "mixed positive and negative conditions",
beforeFunc: func(obj *sourcev1.HelmRepository) {
conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision")
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret")
},
assertConditions: []metav1.Condition{
*conditions.FalseCondition(meta.ReadyCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"),
*conditions.TrueCondition(sourcev1.FetchFailedCondition, sourcev1.AuthenticationFailedReason, "failed to get secret"),
*conditions.TrueCondition(sourcev1.ArtifactInStorageCondition, meta.SucceededReason, "stored artifact for revision"),
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
obj := &sourcev1.HelmRepository{
TypeMeta: metav1.TypeMeta{
Kind: sourcev1.HelmRepositoryKind,
APIVersion: "source.toolkit.fluxcd.io/v1beta2",
},
ObjectMeta: metav1.ObjectMeta{
Name: "helmrepo",
Namespace: "foo",
},
}
clientBuilder := fake.NewClientBuilder()
clientBuilder.WithObjects(obj)
c := clientBuilder.Build()
patchHelper, err := patch.NewHelper(obj, c)
g.Expect(err).ToNot(HaveOccurred())
if tt.beforeFunc != nil {
tt.beforeFunc(obj)
}
ctx := context.TODO()
recResult := sreconcile.ResultSuccess
var retErr error
summarizeHelper := summarize.NewHelper(record.NewFakeRecorder(32), patchHelper)
summarizeOpts := []summarize.Option{
summarize.WithConditions(helmRepositoryReadyCondition),
summarize.WithReconcileResult(recResult),
summarize.WithReconcileError(retErr),
summarize.WithIgnoreNotFound(),
summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetRequeueAfter()}),
summarize.WithPatchFieldOwner("source-controller"),
}
_, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...)
key := client.ObjectKeyFromObject(obj)
g.Expect(c.Get(ctx, key, obj)).ToNot(HaveOccurred())
g.Expect(obj.GetConditions()).To(conditions.MatchConditions(tt.assertConditions))
})
}
}
func TestHelmRepositoryReconciler_notify(t *testing.T) {
var aSize int64 = 30000
tests := []struct {
name string
res sreconcile.Result
resErr error
oldObjBeforeFunc func(obj *sourcev1.HelmRepository)
newObjBeforeFunc func(obj *sourcev1.HelmRepository)
wantEvent string
}{
{
name: "error - no event",
res: sreconcile.ResultEmpty,
resErr: errors.New("some error"),
},
{
name: "new artifact with nil size",
res: sreconcile.ResultSuccess,
resErr: nil,
newObjBeforeFunc: func(obj *sourcev1.HelmRepository) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy", Size: nil}
},
wantEvent: "Normal NewArtifact stored fetched index of unknown size",
},
{
name: "new artifact",
res: sreconcile.ResultSuccess,
resErr: nil,
newObjBeforeFunc: func(obj *sourcev1.HelmRepository) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy", Size: &aSize}
},
wantEvent: "Normal NewArtifact stored fetched index of size",
},
{
name: "recovery from failure",
res: sreconcile.ResultSuccess,
resErr: nil,
oldObjBeforeFunc: func(obj *sourcev1.HelmRepository) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy", Size: &aSize}
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail")
conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo")
},
newObjBeforeFunc: func(obj *sourcev1.HelmRepository) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy", Size: &aSize}
conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
},
wantEvent: "Normal Succeeded stored fetched index of size",
},
{
name: "recovery and new artifact",
res: sreconcile.ResultSuccess,
resErr: nil,
oldObjBeforeFunc: func(obj *sourcev1.HelmRepository) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy", Size: &aSize}
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, sourcev1.GitOperationFailedReason, "fail")
conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, "foo")
},
newObjBeforeFunc: func(obj *sourcev1.HelmRepository) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "aaa", Checksum: "bbb", Size: &aSize}
conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
},
wantEvent: "Normal NewArtifact stored fetched index of size",
},
{
name: "no updates",
res: sreconcile.ResultSuccess,
resErr: nil,
oldObjBeforeFunc: func(obj *sourcev1.HelmRepository) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy", Size: &aSize}
conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
},
newObjBeforeFunc: func(obj *sourcev1.HelmRepository) {
obj.Status.Artifact = &sourcev1.Artifact{Revision: "xxx", Checksum: "yyy", Size: &aSize}
conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "ready")
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
recorder := record.NewFakeRecorder(32)
oldObj := &sourcev1.HelmRepository{}
newObj := oldObj.DeepCopy()
if tt.oldObjBeforeFunc != nil {
tt.oldObjBeforeFunc(oldObj)
}
if tt.newObjBeforeFunc != nil {
tt.newObjBeforeFunc(newObj)
}
reconciler := &HelmRepositoryReconciler{
EventRecorder: recorder,
}
chartRepo := repository.ChartRepository{
URL: "some-address",
}
reconciler.notify(oldObj, newObj, chartRepo, tt.res, tt.resErr)
select {
case x, ok := <-recorder.Events:
g.Expect(ok).To(Equal(tt.wantEvent != ""), "unexpected event received")
if tt.wantEvent != "" {
g.Expect(x).To(ContainSubstring(tt.wantEvent))
}
default:
if tt.wantEvent != "" {
t.Errorf("expected some event to be emitted")
}
}
})
}
}

View File

@ -14,13 +14,13 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package controller package controllers
import ( import (
"sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/predicate"
sourcev1 "github.com/fluxcd/source-controller/api/v1" sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
) )
type SourceRevisionChangePredicate struct { type SourceRevisionChangePredicate struct {

View File

@ -14,13 +14,15 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package controller package controllers
import ( import (
"archive/tar" "archive/tar"
"compress/gzip" "compress/gzip"
"context" "context"
"crypto/sha256"
"fmt" "fmt"
"hash"
"io" "io"
"io/fs" "io/fs"
"net/url" "net/url"
@ -31,31 +33,19 @@ import (
"time" "time"
securejoin "github.com/cyphar/filepath-securejoin" securejoin "github.com/cyphar/filepath-securejoin"
"github.com/fluxcd/pkg/lockedfile"
"github.com/fluxcd/pkg/untar"
"github.com/go-git/go-git/v5/plumbing/format/gitignore" "github.com/go-git/go-git/v5/plumbing/format/gitignore"
"github.com/opencontainers/go-digest"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kerrors "k8s.io/apimachinery/pkg/util/errors" kerrors "k8s.io/apimachinery/pkg/util/errors"
"github.com/fluxcd/pkg/lockedfile" sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
"github.com/fluxcd/pkg/sourceignore"
pkgtar "github.com/fluxcd/pkg/tar"
v1 "github.com/fluxcd/source-controller/api/v1"
intdigest "github.com/fluxcd/source-controller/internal/digest"
sourcefs "github.com/fluxcd/source-controller/internal/fs" sourcefs "github.com/fluxcd/source-controller/internal/fs"
"github.com/fluxcd/source-controller/pkg/sourceignore"
) )
const GarbageCountLimit = 1000 const GarbageCountLimit = 1000
const (
// defaultFileMode is the permission mode applied to files inside an artifact archive.
defaultFileMode int64 = 0o600
// defaultDirMode is the permission mode applied to all directories inside an artifact archive.
defaultDirMode int64 = 0o750
// defaultExeFileMode is the permission mode applied to executable files inside an artifact archive.
defaultExeFileMode int64 = 0o700
)
// Storage manages artifacts // Storage manages artifacts
type Storage struct { type Storage struct {
// BasePath is the local directory path where the source artifacts are stored. // BasePath is the local directory path where the source artifacts are stored.
@ -64,12 +54,12 @@ type Storage struct {
// Hostname is the file server host name used to compose the artifacts URIs. // Hostname is the file server host name used to compose the artifacts URIs.
Hostname string `json:"hostname"` Hostname string `json:"hostname"`
// ArtifactRetentionTTL is the duration of time that artifacts will be kept // ArtifactRetentionTTL is the maximum number of artifacts to be kept in storage
// in storage before being garbage collected. // after a garbage collection.
ArtifactRetentionTTL time.Duration `json:"artifactRetentionTTL"` ArtifactRetentionTTL time.Duration `json:"artifactRetentionTTL"`
// ArtifactRetentionRecords is the maximum number of artifacts to be kept in // ArtifactRetentionRecords is the duration of time that artifacts will be kept in
// storage after a garbage collection. // storage before being garbage collected.
ArtifactRetentionRecords int `json:"artifactRetentionRecords"` ArtifactRetentionRecords int `json:"artifactRetentionRecords"`
} }
@ -86,10 +76,10 @@ func NewStorage(basePath string, hostname string, artifactRetentionTTL time.Dura
}, nil }, nil
} }
// NewArtifactFor returns a new v1.Artifact. // NewArtifactFor returns a new v1beta1.Artifact.
func (s Storage) NewArtifactFor(kind string, metadata metav1.Object, revision, fileName string) v1.Artifact { func (s *Storage) NewArtifactFor(kind string, metadata metav1.Object, revision, fileName string) sourcev1.Artifact {
path := v1.ArtifactPath(kind, metadata.GetNamespace(), metadata.GetName(), fileName) path := sourcev1.ArtifactPath(kind, metadata.GetNamespace(), metadata.GetName(), fileName)
artifact := v1.Artifact{ artifact := sourcev1.Artifact{
Path: path, Path: path,
Revision: revision, Revision: revision,
} }
@ -97,8 +87,8 @@ func (s Storage) NewArtifactFor(kind string, metadata metav1.Object, revision, f
return artifact return artifact
} }
// SetArtifactURL sets the URL on the given v1.Artifact. // SetArtifactURL sets the URL on the given v1beta1.Artifact.
func (s Storage) SetArtifactURL(artifact *v1.Artifact) { func (s Storage) SetArtifactURL(artifact *sourcev1.Artifact) {
if artifact.Path == "" { if artifact.Path == "" {
return return
} }
@ -119,19 +109,14 @@ func (s Storage) SetHostname(URL string) string {
return u.String() return u.String()
} }
// MkdirAll calls os.MkdirAll for the given v1.Artifact base dir. // MkdirAll calls os.MkdirAll for the given v1beta1.Artifact base dir.
func (s Storage) MkdirAll(artifact v1.Artifact) error { func (s *Storage) MkdirAll(artifact sourcev1.Artifact) error {
dir := filepath.Dir(s.LocalPath(artifact)) dir := filepath.Dir(s.LocalPath(artifact))
return os.MkdirAll(dir, 0o700) return os.MkdirAll(dir, 0o770)
} }
// Remove calls os.Remove for the given v1.Artifact path. // RemoveAll calls os.RemoveAll for the given v1beta1.Artifact base dir.
func (s Storage) Remove(artifact v1.Artifact) error { func (s *Storage) RemoveAll(artifact sourcev1.Artifact) (string, error) {
return os.Remove(s.LocalPath(artifact))
}
// RemoveAll calls os.RemoveAll for the given v1.Artifact base dir.
func (s Storage) RemoveAll(artifact v1.Artifact) (string, error) {
var deletedDir string var deletedDir string
dir := filepath.Dir(s.LocalPath(artifact)) dir := filepath.Dir(s.LocalPath(artifact))
// Check if the dir exists. // Check if the dir exists.
@ -142,8 +127,8 @@ func (s Storage) RemoveAll(artifact v1.Artifact) (string, error) {
return deletedDir, os.RemoveAll(dir) return deletedDir, os.RemoveAll(dir)
} }
// RemoveAllButCurrent removes all files for the given v1.Artifact base dir, excluding the current one. // RemoveAllButCurrent removes all files for the given v1beta1.Artifact base dir, excluding the current one.
func (s Storage) RemoveAllButCurrent(artifact v1.Artifact) ([]string, error) { func (s *Storage) RemoveAllButCurrent(artifact sourcev1.Artifact) ([]string, error) {
deletedFiles := []string{} deletedFiles := []string{}
localPath := s.LocalPath(artifact) localPath := s.LocalPath(artifact)
dir := filepath.Dir(localPath) dir := filepath.Dir(localPath)
@ -173,17 +158,18 @@ func (s Storage) RemoveAllButCurrent(artifact v1.Artifact) ([]string, error) {
// getGarbageFiles returns all files that need to be garbage collected for the given artifact. // getGarbageFiles returns all files that need to be garbage collected for the given artifact.
// Garbage files are determined based on the below flow: // Garbage files are determined based on the below flow:
// 1. collect all artifact files with an expired ttl // 1. collect all files with an expired ttl
// 2. if we satisfy maxItemsToBeRetained, then return // 2. if we satisfy maxItemsToBeRetained, then return
// 3. else, collect all artifact files till the latest n files remain, where n=maxItemsToBeRetained // 3. else, remove all files till the latest n files remain, where n=maxItemsToBeRetained
func (s Storage) getGarbageFiles(artifact v1.Artifact, totalCountLimit, maxItemsToBeRetained int, ttl time.Duration) (garbageFiles []string, _ error) { func (s *Storage) getGarbageFiles(artifact sourcev1.Artifact, totalCountLimit, maxItemsToBeRetained int, ttl time.Duration) ([]string, error) {
localPath := s.LocalPath(artifact) localPath := s.LocalPath(artifact)
dir := filepath.Dir(localPath) dir := filepath.Dir(localPath)
artifactFilesWithCreatedTs := make(map[time.Time]string) garbageFiles := []string{}
filesWithCreatedTs := make(map[time.Time]string)
// sortedPaths contain all files sorted according to their created ts. // sortedPaths contain all files sorted according to their created ts.
sortedPaths := []string{} sortedPaths := []string{}
now := time.Now().UTC() now := time.Now().UTC()
totalArtifactFiles := 0 totalFiles := 0
var errors []string var errors []string
creationTimestamps := []time.Time{} creationTimestamps := []time.Time{}
_ = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { _ = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
@ -191,8 +177,8 @@ func (s Storage) getGarbageFiles(artifact v1.Artifact, totalCountLimit, maxItems
errors = append(errors, err.Error()) errors = append(errors, err.Error())
return nil return nil
} }
if totalArtifactFiles >= totalCountLimit { if totalFiles >= totalCountLimit {
return fmt.Errorf("reached file walking limit, already walked over: %d", totalArtifactFiles) return fmt.Errorf("reached file walking limit, already walked over: %d", totalFiles)
} }
info, err := d.Info() info, err := d.Info()
if err != nil { if err != nil {
@ -202,16 +188,14 @@ func (s Storage) getGarbageFiles(artifact v1.Artifact, totalCountLimit, maxItems
createdAt := info.ModTime().UTC() createdAt := info.ModTime().UTC()
diff := now.Sub(createdAt) diff := now.Sub(createdAt)
// Compare the time difference between now and the time at which the file was created // Compare the time difference between now and the time at which the file was created
// with the provided TTL. Delete if the difference is greater than the TTL. Since the // with the provided TTL. Delete if the difference is greater than the TTL.
// below logic just deals with determining if an artifact needs to be garbage collected,
// we avoid all lock files, adding them at the end to the list of garbage files.
expired := diff > ttl expired := diff > ttl
if !info.IsDir() && info.Mode()&os.ModeSymlink != os.ModeSymlink && filepath.Ext(path) != ".lock" { if !info.IsDir() && info.Mode()&os.ModeSymlink != os.ModeSymlink {
if path != localPath && expired { if path != localPath && expired {
garbageFiles = append(garbageFiles, path) garbageFiles = append(garbageFiles, path)
} }
totalArtifactFiles += 1 totalFiles += 1
artifactFilesWithCreatedTs[createdAt] = path filesWithCreatedTs[createdAt] = path
creationTimestamps = append(creationTimestamps, createdAt) creationTimestamps = append(creationTimestamps, createdAt)
} }
return nil return nil
@ -223,14 +207,14 @@ func (s Storage) getGarbageFiles(artifact v1.Artifact, totalCountLimit, maxItems
// We already collected enough garbage files to satisfy the no. of max // We already collected enough garbage files to satisfy the no. of max
// items that are supposed to be retained, so exit early. // items that are supposed to be retained, so exit early.
if totalArtifactFiles-len(garbageFiles) < maxItemsToBeRetained { if totalFiles-len(garbageFiles) < maxItemsToBeRetained {
return garbageFiles, nil return garbageFiles, nil
} }
// sort all timestamps in ascending order. // sort all timestamps in an ascending order.
sort.Slice(creationTimestamps, func(i, j int) bool { return creationTimestamps[i].Before(creationTimestamps[j]) }) sort.Slice(creationTimestamps, func(i, j int) bool { return creationTimestamps[i].Before(creationTimestamps[j]) })
for _, ts := range creationTimestamps { for _, ts := range creationTimestamps {
path, ok := artifactFilesWithCreatedTs[ts] path, ok := filesWithCreatedTs[ts]
if !ok { if !ok {
return garbageFiles, fmt.Errorf("failed to fetch file for created ts: %v", ts) return garbageFiles, fmt.Errorf("failed to fetch file for created ts: %v", ts)
} }
@ -240,8 +224,8 @@ func (s Storage) getGarbageFiles(artifact v1.Artifact, totalCountLimit, maxItems
var collected int var collected int
noOfGarbageFiles := len(garbageFiles) noOfGarbageFiles := len(garbageFiles)
for _, path := range sortedPaths { for _, path := range sortedPaths {
if path != localPath && filepath.Ext(path) != ".lock" && !stringInSlice(path, garbageFiles) { if path != localPath && !stringInSlice(path, garbageFiles) {
// If we previously collected some garbage files with an expired ttl, then take that into account // If we previously collected a few garbage files with an expired ttl, then take that into account
// when checking whether we need to remove more files to satisfy the max no. of items allowed // when checking whether we need to remove more files to satisfy the max no. of items allowed
// in the filesystem, along with the no. of files already removed in this loop. // in the filesystem, along with the no. of files already removed in this loop.
if noOfGarbageFiles > 0 { if noOfGarbageFiles > 0 {
@ -261,9 +245,9 @@ func (s Storage) getGarbageFiles(artifact v1.Artifact, totalCountLimit, maxItems
return garbageFiles, nil return garbageFiles, nil
} }
// GarbageCollect removes all garbage files in the artifact dir according to the provided // GarbageCollect removes all garabge files in the artifact dir according to the provided
// retention options. // retention options.
func (s Storage) GarbageCollect(ctx context.Context, artifact v1.Artifact, timeout time.Duration) ([]string, error) { func (s *Storage) GarbageCollect(ctx context.Context, artifact sourcev1.Artifact, timeout time.Duration) ([]string, error) {
delFilesChan := make(chan []string) delFilesChan := make(chan []string)
errChan := make(chan error) errChan := make(chan error)
// Abort if it takes more than the provided timeout duration. // Abort if it takes more than the provided timeout duration.
@ -286,14 +270,6 @@ func (s Storage) GarbageCollect(ctx context.Context, artifact v1.Artifact, timeo
} else { } else {
deleted = append(deleted, file) deleted = append(deleted, file)
} }
// If a lock file exists for this garbage artifact, remove that too.
lockFile := file + ".lock"
if _, err = os.Lstat(lockFile); err == nil {
err = os.Remove(lockFile)
if err != nil {
errors = append(errors, err)
}
}
} }
} }
if len(errors) > 0 { if len(errors) > 0 {
@ -324,8 +300,8 @@ func stringInSlice(a string, list []string) bool {
return false return false
} }
// ArtifactExist returns a boolean indicating whether the v1.Artifact exists in storage and is a regular file. // ArtifactExist returns a boolean indicating whether the v1beta1.Artifact exists in storage and is a regular file.
func (s Storage) ArtifactExist(artifact v1.Artifact) bool { func (s *Storage) ArtifactExist(artifact sourcev1.Artifact) bool {
fi, err := os.Lstat(s.LocalPath(artifact)) fi, err := os.Lstat(s.LocalPath(artifact))
if err != nil { if err != nil {
return false return false
@ -333,35 +309,6 @@ func (s Storage) ArtifactExist(artifact v1.Artifact) bool {
return fi.Mode().IsRegular() return fi.Mode().IsRegular()
} }
// VerifyArtifact verifies if the Digest of the v1.Artifact matches the digest
// of the file in Storage. It returns an error if the digests don't match, or
// if it can't be verified.
func (s Storage) VerifyArtifact(artifact v1.Artifact) error {
if artifact.Digest == "" {
return fmt.Errorf("artifact has no digest")
}
d, err := digest.Parse(artifact.Digest)
if err != nil {
return fmt.Errorf("failed to parse artifact digest '%s': %w", artifact.Digest, err)
}
f, err := os.Open(s.LocalPath(artifact))
if err != nil {
return err
}
defer f.Close()
verifier := d.Verifier()
if _, err = io.Copy(verifier, f); err != nil {
return err
}
if !verifier.Verified() {
return fmt.Errorf("computed digest doesn't match '%s'", d.String())
}
return nil
}
// ArchiveFileFilter must return true if a file should not be included in the archive after inspecting the given path // ArchiveFileFilter must return true if a file should not be included in the archive after inspecting the given path
// and/or os.FileInfo. // and/or os.FileInfo.
type ArchiveFileFilter func(p string, fi os.FileInfo) bool type ArchiveFileFilter func(p string, fi os.FileInfo) bool
@ -380,11 +327,11 @@ func SourceIgnoreFilter(ps []gitignore.Pattern, domain []string) ArchiveFileFilt
} }
} }
// Archive atomically archives the given directory as a tarball to the given v1.Artifact path, excluding // Archive atomically archives the given directory as a tarball to the given v1beta1.Artifact path, excluding
// directories and any ArchiveFileFilter matches. While archiving, any environment specific data (for example, // directories and any ArchiveFileFilter matches. While archiving, any environment specific data (for example,
// the user and group name) is stripped from file headers. // the user and group name) is stripped from file headers.
// If successful, it sets the digest and last update time on the artifact. // If successful, it sets the checksum and last update time on the artifact.
func (s Storage) Archive(artifact *v1.Artifact, dir string, filter ArchiveFileFilter) (err error) { func (s *Storage) Archive(artifact *sourcev1.Artifact, dir string, filter ArchiveFileFilter) (err error) {
if f, err := os.Stat(dir); os.IsNotExist(err) || !f.IsDir() { if f, err := os.Stat(dir); os.IsNotExist(err) || !f.IsDir() {
return fmt.Errorf("invalid dir path: %s", dir) return fmt.Errorf("invalid dir path: %s", dir)
} }
@ -401,9 +348,9 @@ func (s Storage) Archive(artifact *v1.Artifact, dir string, filter ArchiveFileFi
} }
}() }()
d := intdigest.Canonical.Digester() h := newHash()
sz := &writeCounter{} sz := &writeCounter{}
mw := io.MultiWriter(d.Hash(), tf, sz) mw := io.MultiWriter(h, tf, sz)
gw := gzip.NewWriter(mw) gw := gzip.NewWriter(mw)
tw := tar.NewWriter(gw) tw := tar.NewWriter(gw)
@ -426,7 +373,6 @@ func (s Storage) Archive(artifact *v1.Artifact, dir string, filter ArchiveFileFi
if err != nil { if err != nil {
return err return err
} }
// The name needs to be modified to maintain directory structure // The name needs to be modified to maintain directory structure
// as tar.FileInfoHeader only has access to the base name of the file. // as tar.FileInfoHeader only has access to the base name of the file.
// Ref: https://golang.org/src/archive/tar/common.go?#L626 // Ref: https://golang.org/src/archive/tar/common.go?#L626
@ -437,7 +383,17 @@ func (s Storage) Archive(artifact *v1.Artifact, dir string, filter ArchiveFileFi
return err return err
} }
} }
sanitizeHeader(relFilePath, header) header.Name = relFilePath
// We want to remove any environment specific data as well, this
// ensures the checksum is purely content based.
header.Gid = 0
header.Uid = 0
header.Uname = ""
header.Gname = ""
header.ModTime = time.Time{}
header.AccessTime = time.Time{}
header.ChangeTime = time.Time{}
if err := tw.WriteHeader(header); err != nil { if err := tw.WriteHeader(header); err != nil {
return err return err
@ -476,7 +432,7 @@ func (s Storage) Archive(artifact *v1.Artifact, dir string, filter ArchiveFileFi
return err return err
} }
if err := os.Chmod(tmpName, 0o600); err != nil { if err := os.Chmod(tmpName, 0o640); err != nil {
return err return err
} }
@ -484,16 +440,16 @@ func (s Storage) Archive(artifact *v1.Artifact, dir string, filter ArchiveFileFi
return err return err
} }
artifact.Digest = d.Digest().String() artifact.Checksum = fmt.Sprintf("%x", h.Sum(nil))
artifact.LastUpdateTime = metav1.Now() artifact.LastUpdateTime = metav1.Now()
artifact.Size = &sz.written artifact.Size = &sz.written
return nil return nil
} }
// AtomicWriteFile atomically writes the io.Reader contents to the v1.Artifact path. // AtomicWriteFile atomically writes the io.Reader contents to the v1beta1.Artifact path.
// If successful, it sets the digest and last update time on the artifact. // If successful, it sets the checksum and last update time on the artifact.
func (s Storage) AtomicWriteFile(artifact *v1.Artifact, reader io.Reader, mode os.FileMode) (err error) { func (s *Storage) AtomicWriteFile(artifact *sourcev1.Artifact, reader io.Reader, mode os.FileMode) (err error) {
localPath := s.LocalPath(*artifact) localPath := s.LocalPath(*artifact)
tf, err := os.CreateTemp(filepath.Split(localPath)) tf, err := os.CreateTemp(filepath.Split(localPath))
if err != nil { if err != nil {
@ -506,9 +462,9 @@ func (s Storage) AtomicWriteFile(artifact *v1.Artifact, reader io.Reader, mode o
} }
}() }()
d := intdigest.Canonical.Digester() h := newHash()
sz := &writeCounter{} sz := &writeCounter{}
mw := io.MultiWriter(tf, d.Hash(), sz) mw := io.MultiWriter(h, tf, sz)
if _, err := io.Copy(mw, reader); err != nil { if _, err := io.Copy(mw, reader); err != nil {
tf.Close() tf.Close()
@ -526,16 +482,16 @@ func (s Storage) AtomicWriteFile(artifact *v1.Artifact, reader io.Reader, mode o
return err return err
} }
artifact.Digest = d.Digest().String() artifact.Checksum = fmt.Sprintf("%x", h.Sum(nil))
artifact.LastUpdateTime = metav1.Now() artifact.LastUpdateTime = metav1.Now()
artifact.Size = &sz.written artifact.Size = &sz.written
return nil return nil
} }
// Copy atomically copies the io.Reader contents to the v1.Artifact path. // Copy atomically copies the io.Reader contents to the v1beta1.Artifact path.
// If successful, it sets the digest and last update time on the artifact. // If successful, it sets the checksum and last update time on the artifact.
func (s Storage) Copy(artifact *v1.Artifact, reader io.Reader) (err error) { func (s *Storage) Copy(artifact *sourcev1.Artifact, reader io.Reader) (err error) {
localPath := s.LocalPath(*artifact) localPath := s.LocalPath(*artifact)
tf, err := os.CreateTemp(filepath.Split(localPath)) tf, err := os.CreateTemp(filepath.Split(localPath))
if err != nil { if err != nil {
@ -548,9 +504,9 @@ func (s Storage) Copy(artifact *v1.Artifact, reader io.Reader) (err error) {
} }
}() }()
d := intdigest.Canonical.Digester() h := newHash()
sz := &writeCounter{} sz := &writeCounter{}
mw := io.MultiWriter(tf, d.Hash(), sz) mw := io.MultiWriter(h, tf, sz)
if _, err := io.Copy(mw, reader); err != nil { if _, err := io.Copy(mw, reader); err != nil {
tf.Close() tf.Close()
@ -564,16 +520,16 @@ func (s Storage) Copy(artifact *v1.Artifact, reader io.Reader) (err error) {
return err return err
} }
artifact.Digest = d.Digest().String() artifact.Checksum = fmt.Sprintf("%x", h.Sum(nil))
artifact.LastUpdateTime = metav1.Now() artifact.LastUpdateTime = metav1.Now()
artifact.Size = &sz.written artifact.Size = &sz.written
return nil return nil
} }
// CopyFromPath atomically copies the contents of the given path to the path of the v1.Artifact. // CopyFromPath atomically copies the contents of the given path to the path of the v1beta1.Artifact.
// If successful, the digest and last update time on the artifact is set. // If successful, the checksum and last update time on the artifact is set.
func (s Storage) CopyFromPath(artifact *v1.Artifact, path string) (err error) { func (s *Storage) CopyFromPath(artifact *sourcev1.Artifact, path string) (err error) {
f, err := os.Open(path) f, err := os.Open(path)
if err != nil { if err != nil {
return err return err
@ -588,7 +544,7 @@ func (s Storage) CopyFromPath(artifact *v1.Artifact, path string) (err error) {
} }
// CopyToPath copies the contents in the (sub)path of the given artifact to the given path. // CopyToPath copies the contents in the (sub)path of the given artifact to the given path.
func (s Storage) CopyToPath(artifact *v1.Artifact, subPath, toPath string) error { func (s *Storage) CopyToPath(artifact *sourcev1.Artifact, subPath, toPath string) error {
// create a tmp directory to store artifact // create a tmp directory to store artifact
tmp, err := os.MkdirTemp("", "flux-include-") tmp, err := os.MkdirTemp("", "flux-include-")
if err != nil { if err != nil {
@ -606,7 +562,7 @@ func (s Storage) CopyToPath(artifact *v1.Artifact, subPath, toPath string) error
// untar the artifact // untar the artifact
untarPath := filepath.Join(tmp, "unpack") untarPath := filepath.Join(tmp, "unpack")
if err = pkgtar.Untar(f, untarPath, pkgtar.WithMaxUntarSize(-1)); err != nil { if _, err = untar.Untar(f, untarPath); err != nil {
return err return err
} }
@ -626,8 +582,8 @@ func (s Storage) CopyToPath(artifact *v1.Artifact, subPath, toPath string) error
return nil return nil
} }
// Symlink creates or updates a symbolic link for the given v1.Artifact and returns the URL for the symlink. // Symlink creates or updates a symbolic link for the given v1beta1.Artifact and returns the URL for the symlink.
func (s Storage) Symlink(artifact v1.Artifact, linkName string) (string, error) { func (s *Storage) Symlink(artifact sourcev1.Artifact, linkName string) (string, error) {
localPath := s.LocalPath(artifact) localPath := s.LocalPath(artifact)
dir := filepath.Dir(localPath) dir := filepath.Dir(localPath)
link := filepath.Join(dir, linkName) link := filepath.Join(dir, linkName)
@ -645,18 +601,26 @@ func (s Storage) Symlink(artifact v1.Artifact, linkName string) (string, error)
return "", err return "", err
} }
return fmt.Sprintf("http://%s/%s", s.Hostname, filepath.Join(filepath.Dir(artifact.Path), linkName)), nil url := fmt.Sprintf("http://%s/%s", s.Hostname, filepath.Join(filepath.Dir(artifact.Path), linkName))
return url, nil
} }
// Lock creates a file lock for the given v1.Artifact. // Checksum returns the SHA256 checksum for the data of the given io.Reader as a string.
func (s Storage) Lock(artifact v1.Artifact) (unlock func(), err error) { func (s *Storage) Checksum(reader io.Reader) string {
h := newHash()
_, _ = io.Copy(h, reader)
return fmt.Sprintf("%x", h.Sum(nil))
}
// Lock creates a file lock for the given v1beta1.Artifact.
func (s *Storage) Lock(artifact sourcev1.Artifact) (unlock func(), err error) {
lockFile := s.LocalPath(artifact) + ".lock" lockFile := s.LocalPath(artifact) + ".lock"
mutex := lockedfile.MutexAt(lockFile) mutex := lockedfile.MutexAt(lockFile)
return mutex.Lock() return mutex.Lock()
} }
// LocalPath returns the secure local path of the given artifact (that is: relative to the Storage.BasePath). // LocalPath returns the secure local path of the given artifact (that is: relative to the Storage.BasePath).
func (s Storage) LocalPath(artifact v1.Artifact) string { func (s *Storage) LocalPath(artifact sourcev1.Artifact) string {
if artifact.Path == "" { if artifact.Path == "" {
return "" return ""
} }
@ -667,7 +631,12 @@ func (s Storage) LocalPath(artifact v1.Artifact) string {
return path return path
} }
// writeCounter is an implementation of io.Writer that only records the number // newHash returns a new SHA256 hash.
func newHash() hash.Hash {
return sha256.New()
}
// writecounter is an implementation of io.Writer that only records the number
// of bytes written. // of bytes written.
type writeCounter struct { type writeCounter struct {
written int64 written int64
@ -678,42 +647,3 @@ func (wc *writeCounter) Write(p []byte) (int, error) {
wc.written += int64(n) wc.written += int64(n)
return n, nil return n, nil
} }
// sanitizeHeader modifies the tar.Header to be relative to the root of the
// archive and removes any environment specific data.
func sanitizeHeader(relP string, h *tar.Header) {
// Modify the name to be relative to the root of the archive,
// this ensures we maintain the same structure when extracting.
h.Name = relP
// We want to remove any environment specific data as well, this
// ensures the checksum is purely content based.
h.Gid = 0
h.Uid = 0
h.Uname = ""
h.Gname = ""
h.ModTime = time.Time{}
h.AccessTime = time.Time{}
h.ChangeTime = time.Time{}
// Override the mode to be the default for the type of file.
setDefaultMode(h)
}
// setDefaultMode sets the default mode for the given header.
func setDefaultMode(h *tar.Header) {
if h.FileInfo().IsDir() {
h.Mode = defaultDirMode
return
}
if h.FileInfo().Mode().IsRegular() {
mode := h.FileInfo().Mode()
if mode&os.ModeType == 0 && mode&0o111 != 0 {
h.Mode = defaultExeFileMode
return
}
h.Mode = defaultFileMode
return
}
}

View File

@ -14,17 +14,16 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package controller package controllers
import ( import (
"archive/tar" "archive/tar"
"bytes"
"compress/gzip" "compress/gzip"
"context" "context"
"errors"
"fmt" "fmt"
"io" "io"
"os" "os"
"path"
"path/filepath" "path/filepath"
"strings" "strings"
"testing" "testing"
@ -33,11 +32,23 @@ import (
"github.com/go-git/go-git/v5/plumbing/format/gitignore" "github.com/go-git/go-git/v5/plumbing/format/gitignore"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
sourcev1 "github.com/fluxcd/source-controller/api/v1" sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
) )
func createStoragePath() (string, error) {
return os.MkdirTemp("", "")
}
func cleanupStoragePath(dir string) func() {
return func() { os.RemoveAll(dir) }
}
func TestStorageConstructor(t *testing.T) { func TestStorageConstructor(t *testing.T) {
dir := t.TempDir() dir, err := createStoragePath()
if err != nil {
t.Fatal(err)
}
t.Cleanup(cleanupStoragePath(dir))
if _, err := NewStorage("/nonexistent", "hostname", time.Minute, 2); err == nil { if _, err := NewStorage("/nonexistent", "hostname", time.Minute, 2); err == nil {
t.Fatal("nonexistent path was allowable in storage constructor") t.Fatal("nonexistent path was allowable in storage constructor")
@ -62,16 +73,16 @@ func TestStorageConstructor(t *testing.T) {
// walks a tar.gz and looks for paths with the basename. It does not match // walks a tar.gz and looks for paths with the basename. It does not match
// symlinks properly at this time because that's painful. // symlinks properly at this time because that's painful.
func walkTar(tarFile string, match string, dir bool) (int64, int64, bool, error) { func walkTar(tarFile string, match string, dir bool) (int64, bool, error) {
f, err := os.Open(tarFile) f, err := os.Open(tarFile)
if err != nil { if err != nil {
return 0, 0, false, fmt.Errorf("could not open file: %w", err) return 0, false, fmt.Errorf("could not open file: %w", err)
} }
defer f.Close() defer f.Close()
gzr, err := gzip.NewReader(f) gzr, err := gzip.NewReader(f)
if err != nil { if err != nil {
return 0, 0, false, fmt.Errorf("could not unzip file: %w", err) return 0, false, fmt.Errorf("could not unzip file: %w", err)
} }
defer gzr.Close() defer gzr.Close()
@ -81,42 +92,49 @@ func walkTar(tarFile string, match string, dir bool) (int64, int64, bool, error)
if err == io.EOF { if err == io.EOF {
break break
} else if err != nil { } else if err != nil {
return 0, 0, false, fmt.Errorf("corrupt tarball reading header: %w", err) return 0, false, fmt.Errorf("corrupt tarball reading header: %w", err)
} }
switch header.Typeflag { switch header.Typeflag {
case tar.TypeDir: case tar.TypeDir:
if header.Name == match && dir { if header.Name == match && dir {
return 0, header.Mode, true, nil return 0, true, nil
} }
case tar.TypeReg: case tar.TypeReg:
if header.Name == match { if header.Name == match {
return header.Size, header.Mode, true, nil return header.Size, true, nil
} }
default: default:
// skip // skip
} }
} }
return 0, 0, false, nil return 0, false, nil
} }
func TestStorage_Archive(t *testing.T) { func TestStorage_Archive(t *testing.T) {
dir := t.TempDir() dir, err := createStoragePath()
if err != nil {
t.Fatal(err)
}
t.Cleanup(cleanupStoragePath(dir))
storage, err := NewStorage(dir, "hostname", time.Minute, 2) storage, err := NewStorage(dir, "hostname", time.Minute, 2)
if err != nil { if err != nil {
t.Fatalf("error while bootstrapping storage: %v", err) t.Fatalf("error while bootstrapping storage: %v", err)
} }
type dummyFile struct { createFiles := func(files map[string][]byte) (dir string, err error) {
content []byte defer func() {
mode int64 if err != nil && dir != "" {
} os.RemoveAll(dir)
}
createFiles := func(files map[string]dummyFile) (dir string, err error) { }()
dir = t.TempDir() dir, err = os.MkdirTemp("", "archive-test-files-")
for name, df := range files { if err != nil {
return
}
for name, b := range files {
absPath := filepath.Join(dir, name) absPath := filepath.Join(dir, name)
if err = os.MkdirAll(filepath.Dir(absPath), 0o750); err != nil { if err = os.MkdirAll(filepath.Dir(absPath), 0o750); err != nil {
return return
@ -125,33 +143,27 @@ func TestStorage_Archive(t *testing.T) {
if err != nil { if err != nil {
return "", fmt.Errorf("could not create file %q: %w", absPath, err) return "", fmt.Errorf("could not create file %q: %w", absPath, err)
} }
if n, err := f.Write(df.content); err != nil { if n, err := f.Write(b); err != nil {
f.Close() f.Close()
return "", fmt.Errorf("could not write %d bytes to file %q: %w", n, f.Name(), err) return "", fmt.Errorf("could not write %d bytes to file %q: %w", n, f.Name(), err)
} }
f.Close() f.Close()
if df.mode != 0 {
if err = os.Chmod(absPath, os.FileMode(df.mode)); err != nil {
return "", fmt.Errorf("could not chmod file %q: %w", absPath, err)
}
}
} }
return return
} }
matchFiles := func(t *testing.T, storage *Storage, artifact sourcev1.Artifact, files map[string]dummyFile, dirs []string) { matchFiles := func(t *testing.T, storage *Storage, artifact sourcev1.Artifact, files map[string][]byte, dirs []string) {
t.Helper() t.Helper()
for name, df := range files { for name, b := range files {
mustExist := !(name[0:1] == "!") mustExist := !(name[0:1] == "!")
if !mustExist { if !mustExist {
name = name[1:] name = name[1:]
} }
s, m, exist, err := walkTar(storage.LocalPath(artifact), name, false) s, exist, err := walkTar(storage.LocalPath(artifact), name, false)
if err != nil { if err != nil {
t.Fatalf("failed reading tarball: %v", err) t.Fatalf("failed reading tarball: %v", err)
} }
if bs := int64(len(df.content)); s != bs { if bs := int64(len(b)); s != bs {
t.Fatalf("%q size %v != %v", name, s, bs) t.Fatalf("%q size %v != %v", name, s, bs)
} }
if exist != mustExist { if exist != mustExist {
@ -161,20 +173,13 @@ func TestStorage_Archive(t *testing.T) {
t.Errorf("tarball contained excluded file %q", name) t.Errorf("tarball contained excluded file %q", name)
} }
} }
expectMode := df.mode
if expectMode == 0 {
expectMode = defaultFileMode
}
if exist && m != expectMode {
t.Fatalf("%q mode %v != %v", name, m, expectMode)
}
} }
for _, name := range dirs { for _, name := range dirs {
mustExist := !(name[0:1] == "!") mustExist := !(name[0:1] == "!")
if !mustExist { if !mustExist {
name = name[1:] name = name[1:]
} }
_, m, exist, err := walkTar(storage.LocalPath(artifact), name, true) _, exist, err := walkTar(storage.LocalPath(artifact), name, true)
if err != nil { if err != nil {
t.Fatalf("failed reading tarball: %v", err) t.Fatalf("failed reading tarball: %v", err)
} }
@ -185,71 +190,67 @@ func TestStorage_Archive(t *testing.T) {
t.Errorf("tarball contained excluded file %q", name) t.Errorf("tarball contained excluded file %q", name)
} }
} }
if exist && m != defaultDirMode {
t.Fatalf("%q mode %v != %v", name, m, defaultDirMode)
}
} }
} }
tests := []struct { tests := []struct {
name string name string
files map[string]dummyFile files map[string][]byte
filter ArchiveFileFilter filter ArchiveFileFilter
want map[string]dummyFile want map[string][]byte
wantDirs []string wantDirs []string
wantErr bool wantErr bool
}{ }{
{ {
name: "no filter", name: "no filter",
files: map[string]dummyFile{ files: map[string][]byte{
".git/config": {}, ".git/config": nil,
"file.jpg": {content: []byte(`contents`)}, "file.jpg": []byte(`contents`),
"manifest.yaml": {}, "manifest.yaml": nil,
}, },
filter: nil, filter: nil,
want: map[string]dummyFile{ want: map[string][]byte{
".git/config": {}, ".git/config": nil,
"file.jpg": {content: []byte(`contents`)}, "file.jpg": []byte(`contents`),
"manifest.yaml": {}, "manifest.yaml": nil,
}, },
}, },
{ {
name: "exclude VCS", name: "exclude VCS",
files: map[string]dummyFile{ files: map[string][]byte{
".git/config": {}, ".git/config": nil,
"manifest.yaml": {}, "manifest.yaml": nil,
}, },
wantDirs: []string{ wantDirs: []string{
"!.git", "!.git",
}, },
filter: SourceIgnoreFilter(nil, nil), filter: SourceIgnoreFilter(nil, nil),
want: map[string]dummyFile{ want: map[string][]byte{
"!.git/config": {}, "!.git/config": nil,
"manifest.yaml": {}, "manifest.yaml": nil,
}, },
}, },
{ {
name: "custom", name: "custom",
files: map[string]dummyFile{ files: map[string][]byte{
".git/config": {}, ".git/config": nil,
"custom": {}, "custom": nil,
"horse.jpg": {}, "horse.jpg": nil,
}, },
filter: SourceIgnoreFilter([]gitignore.Pattern{ filter: SourceIgnoreFilter([]gitignore.Pattern{
gitignore.ParsePattern("custom", nil), gitignore.ParsePattern("custom", nil),
}, nil), }, nil),
want: map[string]dummyFile{ want: map[string][]byte{
"!git/config": {}, "!git/config": nil,
"!custom": {}, "!custom": nil,
"horse.jpg": {}, "horse.jpg": nil,
}, },
wantErr: false, wantErr: false,
}, },
{ {
name: "including directories", name: "including directories",
files: map[string]dummyFile{ files: map[string][]byte{
"test/.gitkeep": {}, "test/.gitkeep": nil,
}, },
filter: SourceIgnoreFilter([]gitignore.Pattern{ filter: SourceIgnoreFilter([]gitignore.Pattern{
gitignore.ParsePattern("custom", nil), gitignore.ParsePattern("custom", nil),
@ -259,26 +260,6 @@ func TestStorage_Archive(t *testing.T) {
}, },
wantErr: false, wantErr: false,
}, },
{
name: "sets default file modes",
files: map[string]dummyFile{
"test/file": {
mode: 0o666,
},
"test/executable": {
mode: 0o777,
},
},
want: map[string]dummyFile{
"test/file": {
mode: defaultFileMode,
},
"test/executable": {
mode: defaultExeFileMode,
},
},
wantErr: false,
},
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
@ -302,78 +283,46 @@ func TestStorage_Archive(t *testing.T) {
} }
} }
func TestStorage_Remove(t *testing.T) {
t.Run("removes file", func(t *testing.T) {
g := NewWithT(t)
dir := t.TempDir()
s, err := NewStorage(dir, "", 0, 0)
g.Expect(err).ToNot(HaveOccurred())
artifact := sourcev1.Artifact{
Path: filepath.Join(dir, "test.txt"),
}
g.Expect(s.MkdirAll(artifact)).To(Succeed())
g.Expect(s.AtomicWriteFile(&artifact, bytes.NewReader([]byte("test")), 0o600)).To(Succeed())
g.Expect(s.ArtifactExist(artifact)).To(BeTrue())
g.Expect(s.Remove(artifact)).To(Succeed())
g.Expect(s.ArtifactExist(artifact)).To(BeFalse())
})
t.Run("error if file does not exist", func(t *testing.T) {
g := NewWithT(t)
dir := t.TempDir()
s, err := NewStorage(dir, "", 0, 0)
g.Expect(err).ToNot(HaveOccurred())
artifact := sourcev1.Artifact{
Path: filepath.Join(dir, "test.txt"),
}
err = s.Remove(artifact)
g.Expect(err).To(HaveOccurred())
g.Expect(errors.Is(err, os.ErrNotExist)).To(BeTrue())
})
}
func TestStorageRemoveAllButCurrent(t *testing.T) { func TestStorageRemoveAllButCurrent(t *testing.T) {
t.Run("bad directory in archive", func(t *testing.T) { t.Run("bad directory in archive", func(t *testing.T) {
dir := t.TempDir() dir, err := os.MkdirTemp("", "")
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() { os.RemoveAll(dir) })
s, err := NewStorage(dir, "hostname", time.Minute, 2) s, err := NewStorage(dir, "hostname", time.Minute, 2)
if err != nil { if err != nil {
t.Fatalf("Valid path did not successfully return: %v", err) t.Fatalf("Valid path did not successfully return: %v", err)
} }
if _, err := s.RemoveAllButCurrent(sourcev1.Artifact{Path: filepath.Join(dir, "really", "nonexistent")}); err == nil { if _, err := s.RemoveAllButCurrent(sourcev1.Artifact{Path: path.Join(dir, "really", "nonexistent")}); err == nil {
t.Fatal("Did not error while pruning non-existent path") t.Fatal("Did not error while pruning non-existent path")
} }
}) })
t.Run("collect names of deleted items", func(t *testing.T) { t.Run("collect names of deleted items", func(t *testing.T) {
g := NewWithT(t) g := NewWithT(t)
dir := t.TempDir() dir, err := os.MkdirTemp("", "")
g.Expect(err).ToNot(HaveOccurred())
t.Cleanup(func() { os.RemoveAll(dir) })
s, err := NewStorage(dir, "hostname", time.Minute, 2) s, err := NewStorage(dir, "hostname", time.Minute, 2)
g.Expect(err).ToNot(HaveOccurred(), "failed to create new storage") g.Expect(err).ToNot(HaveOccurred(), "failed to create new storage")
artifact := sourcev1.Artifact{ artifact := sourcev1.Artifact{
Path: filepath.Join("foo", "bar", "artifact1.tar.gz"), Path: path.Join("foo", "bar", "artifact1.tar.gz"),
} }
// Create artifact dir and artifacts. // Create artifact dir and artifacts.
artifactDir := filepath.Join(dir, "foo", "bar") artifactDir := path.Join(dir, "foo", "bar")
g.Expect(os.MkdirAll(artifactDir, 0o750)).NotTo(HaveOccurred()) g.Expect(os.MkdirAll(artifactDir, 0o750)).NotTo(HaveOccurred())
current := []string{ current := []string{
filepath.Join(artifactDir, "artifact1.tar.gz"), path.Join(artifactDir, "artifact1.tar.gz"),
} }
wantDeleted := []string{ wantDeleted := []string{
filepath.Join(artifactDir, "file1.txt"), path.Join(artifactDir, "file1.txt"),
filepath.Join(artifactDir, "file2.txt"), path.Join(artifactDir, "file2.txt"),
} }
createFile := func(files []string) { createFile := func(files []string) {
for _, c := range files { for _, c := range files {
@ -402,22 +351,24 @@ func TestStorageRemoveAll(t *testing.T) {
}{ }{
{ {
name: "delete non-existent path", name: "delete non-existent path",
artifactPath: filepath.Join("foo", "bar", "artifact1.tar.gz"), artifactPath: path.Join("foo", "bar", "artifact1.tar.gz"),
createArtifactPath: false, createArtifactPath: false,
wantDeleted: "", wantDeleted: "",
}, },
{ {
name: "delete existing path", name: "delete existing path",
artifactPath: filepath.Join("foo", "bar", "artifact1.tar.gz"), artifactPath: path.Join("foo", "bar", "artifact1.tar.gz"),
createArtifactPath: true, createArtifactPath: true,
wantDeleted: filepath.Join("foo", "bar"), wantDeleted: path.Join("foo", "bar"),
}, },
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t) g := NewWithT(t)
dir := t.TempDir() dir, err := os.MkdirTemp("", "")
g.Expect(err).ToNot(HaveOccurred())
t.Cleanup(func() { os.RemoveAll(dir) })
s, err := NewStorage(dir, "hostname", time.Minute, 2) s, err := NewStorage(dir, "hostname", time.Minute, 2)
g.Expect(err).ToNot(HaveOccurred(), "failed to create new storage") g.Expect(err).ToNot(HaveOccurred(), "failed to create new storage")
@ -427,7 +378,7 @@ func TestStorageRemoveAll(t *testing.T) {
} }
if tt.createArtifactPath { if tt.createArtifactPath {
g.Expect(os.MkdirAll(filepath.Join(dir, tt.artifactPath), 0o750)).ToNot(HaveOccurred()) g.Expect(os.MkdirAll(path.Join(dir, tt.artifactPath), 0o750)).ToNot(HaveOccurred())
} }
deleted, err := s.RemoveAll(artifact) deleted, err := s.RemoveAll(artifact)
@ -443,7 +394,11 @@ func TestStorageCopyFromPath(t *testing.T) {
Content []byte Content []byte
} }
dir := t.TempDir() dir, err := createStoragePath()
if err != nil {
t.Fatal(err)
}
t.Cleanup(cleanupStoragePath(dir))
storage, err := NewStorage(dir, "hostname", time.Minute, 2) storage, err := NewStorage(dir, "hostname", time.Minute, 2)
if err != nil { if err != nil {
@ -451,7 +406,11 @@ func TestStorageCopyFromPath(t *testing.T) {
} }
createFile := func(file *File) (absPath string, err error) { createFile := func(file *File) (absPath string, err error) {
dir = t.TempDir() dir, err = os.MkdirTemp("", "test-files-")
if err != nil {
return
}
t.Cleanup(cleanupStoragePath(dir))
absPath = filepath.Join(dir, file.Name) absPath = filepath.Join(dir, file.Name)
if err = os.MkdirAll(filepath.Dir(absPath), 0o750); err != nil { if err = os.MkdirAll(filepath.Dir(absPath), 0o750); err != nil {
return return
@ -515,6 +474,7 @@ func TestStorageCopyFromPath(t *testing.T) {
t.Error(err) t.Error(err)
return return
} }
defer os.RemoveAll(absPath)
artifact := sourcev1.Artifact{ artifact := sourcev1.Artifact{
Path: filepath.Join(randStringRunes(10), randStringRunes(10), randStringRunes(10)), Path: filepath.Join(randStringRunes(10), randStringRunes(10), randStringRunes(10)),
} }
@ -530,7 +490,7 @@ func TestStorageCopyFromPath(t *testing.T) {
} }
func TestStorage_getGarbageFiles(t *testing.T) { func TestStorage_getGarbageFiles(t *testing.T) {
artifactFolder := filepath.Join("foo", "bar") artifactFolder := path.Join("foo", "bar")
tests := []struct { tests := []struct {
name string name string
artifactPaths []string artifactPaths []string
@ -543,119 +503,77 @@ func TestStorage_getGarbageFiles(t *testing.T) {
{ {
name: "delete files based on maxItemsToBeRetained", name: "delete files based on maxItemsToBeRetained",
artifactPaths: []string{ artifactPaths: []string{
filepath.Join(artifactFolder, "artifact1.tar.gz"), path.Join(artifactFolder, "artifact1.tar.gz"),
filepath.Join(artifactFolder, "artifact2.tar.gz"), path.Join(artifactFolder, "artifact2.tar.gz"),
filepath.Join(artifactFolder, "artifact3.tar.gz"), path.Join(artifactFolder, "artifact3.tar.gz"),
filepath.Join(artifactFolder, "artifact4.tar.gz"), path.Join(artifactFolder, "artifact4.tar.gz"),
filepath.Join(artifactFolder, "artifact5.tar.gz"), path.Join(artifactFolder, "artifact5.tar.gz"),
}, },
createPause: time.Millisecond * 10, createPause: time.Millisecond * 10,
ttl: time.Minute * 2, ttl: time.Minute * 2,
totalCountLimit: 10, totalCountLimit: 10,
maxItemsToBeRetained: 2, maxItemsToBeRetained: 2,
wantDeleted: []string{ wantDeleted: []string{
filepath.Join(artifactFolder, "artifact1.tar.gz"), path.Join(artifactFolder, "artifact1.tar.gz"),
filepath.Join(artifactFolder, "artifact2.tar.gz"), path.Join(artifactFolder, "artifact2.tar.gz"),
filepath.Join(artifactFolder, "artifact3.tar.gz"), path.Join(artifactFolder, "artifact3.tar.gz"),
},
},
{
name: "delete files based on maxItemsToBeRetained, ignore lock files",
artifactPaths: []string{
filepath.Join(artifactFolder, "artifact1.tar.gz"),
filepath.Join(artifactFolder, "artifact1.tar.gz.lock"),
filepath.Join(artifactFolder, "artifact2.tar.gz"),
filepath.Join(artifactFolder, "artifact2.tar.gz.lock"),
filepath.Join(artifactFolder, "artifact3.tar.gz"),
filepath.Join(artifactFolder, "artifact3.tar.gz.lock"),
filepath.Join(artifactFolder, "artifact4.tar.gz"),
filepath.Join(artifactFolder, "artifact5.tar.gz"),
},
createPause: time.Millisecond * 10,
ttl: time.Minute * 2,
totalCountLimit: 10,
maxItemsToBeRetained: 2,
wantDeleted: []string{
filepath.Join(artifactFolder, "artifact1.tar.gz"),
filepath.Join(artifactFolder, "artifact2.tar.gz"),
filepath.Join(artifactFolder, "artifact3.tar.gz"),
}, },
}, },
{ {
name: "delete files based on ttl", name: "delete files based on ttl",
artifactPaths: []string{ artifactPaths: []string{
filepath.Join(artifactFolder, "artifact1.tar.gz"), path.Join(artifactFolder, "artifact1.tar.gz"),
filepath.Join(artifactFolder, "artifact2.tar.gz"), path.Join(artifactFolder, "artifact2.tar.gz"),
filepath.Join(artifactFolder, "artifact3.tar.gz"), path.Join(artifactFolder, "artifact3.tar.gz"),
filepath.Join(artifactFolder, "artifact4.tar.gz"), path.Join(artifactFolder, "artifact4.tar.gz"),
filepath.Join(artifactFolder, "artifact5.tar.gz"), path.Join(artifactFolder, "artifact5.tar.gz"),
}, },
createPause: time.Second * 1, createPause: time.Second * 1,
ttl: time.Second*3 + time.Millisecond*500, ttl: time.Second*3 + time.Millisecond*500,
totalCountLimit: 10, totalCountLimit: 10,
maxItemsToBeRetained: 4, maxItemsToBeRetained: 4,
wantDeleted: []string{ wantDeleted: []string{
filepath.Join(artifactFolder, "artifact1.tar.gz"), path.Join(artifactFolder, "artifact1.tar.gz"),
filepath.Join(artifactFolder, "artifact2.tar.gz"), path.Join(artifactFolder, "artifact2.tar.gz"),
},
},
{
name: "delete files based on ttl, ignore lock files",
artifactPaths: []string{
filepath.Join(artifactFolder, "artifact1.tar.gz"),
filepath.Join(artifactFolder, "artifact1.tar.gz.lock"),
filepath.Join(artifactFolder, "artifact2.tar.gz"),
filepath.Join(artifactFolder, "artifact2.tar.gz.lock"),
filepath.Join(artifactFolder, "artifact3.tar.gz"),
filepath.Join(artifactFolder, "artifact4.tar.gz"),
filepath.Join(artifactFolder, "artifact5.tar.gz"),
},
createPause: time.Second * 1,
ttl: time.Second*3 + time.Millisecond*500,
totalCountLimit: 10,
maxItemsToBeRetained: 4,
wantDeleted: []string{
filepath.Join(artifactFolder, "artifact1.tar.gz"),
filepath.Join(artifactFolder, "artifact2.tar.gz"),
}, },
}, },
{ {
name: "delete files based on ttl and maxItemsToBeRetained", name: "delete files based on ttl and maxItemsToBeRetained",
artifactPaths: []string{ artifactPaths: []string{
filepath.Join(artifactFolder, "artifact1.tar.gz"), path.Join(artifactFolder, "artifact1.tar.gz"),
filepath.Join(artifactFolder, "artifact2.tar.gz"), path.Join(artifactFolder, "artifact2.tar.gz"),
filepath.Join(artifactFolder, "artifact3.tar.gz"), path.Join(artifactFolder, "artifact3.tar.gz"),
filepath.Join(artifactFolder, "artifact4.tar.gz"), path.Join(artifactFolder, "artifact4.tar.gz"),
filepath.Join(artifactFolder, "artifact5.tar.gz"), path.Join(artifactFolder, "artifact5.tar.gz"),
filepath.Join(artifactFolder, "artifact6.tar.gz"), path.Join(artifactFolder, "artifact6.tar.gz"),
}, },
createPause: time.Second * 1, createPause: time.Second * 1,
ttl: time.Second*5 + time.Millisecond*500, ttl: time.Second*5 + time.Millisecond*500,
totalCountLimit: 10, totalCountLimit: 10,
maxItemsToBeRetained: 4, maxItemsToBeRetained: 4,
wantDeleted: []string{ wantDeleted: []string{
filepath.Join(artifactFolder, "artifact1.tar.gz"), path.Join(artifactFolder, "artifact1.tar.gz"),
filepath.Join(artifactFolder, "artifact2.tar.gz"), path.Join(artifactFolder, "artifact2.tar.gz"),
}, },
}, },
{ {
name: "delete files based on ttl and maxItemsToBeRetained and totalCountLimit", name: "delete files based on ttl and maxItemsToBeRetained and totalCountLimit",
artifactPaths: []string{ artifactPaths: []string{
filepath.Join(artifactFolder, "artifact1.tar.gz"), path.Join(artifactFolder, "artifact1.tar.gz"),
filepath.Join(artifactFolder, "artifact2.tar.gz"), path.Join(artifactFolder, "artifact2.tar.gz"),
filepath.Join(artifactFolder, "artifact3.tar.gz"), path.Join(artifactFolder, "artifact3.tar.gz"),
filepath.Join(artifactFolder, "artifact4.tar.gz"), path.Join(artifactFolder, "artifact4.tar.gz"),
filepath.Join(artifactFolder, "artifact5.tar.gz"), path.Join(artifactFolder, "artifact5.tar.gz"),
filepath.Join(artifactFolder, "artifact6.tar.gz"), path.Join(artifactFolder, "artifact6.tar.gz"),
}, },
createPause: time.Millisecond * 500, createPause: time.Millisecond * 500,
ttl: time.Millisecond * 500, ttl: time.Millisecond * 500,
totalCountLimit: 3, totalCountLimit: 3,
maxItemsToBeRetained: 2, maxItemsToBeRetained: 2,
wantDeleted: []string{ wantDeleted: []string{
filepath.Join(artifactFolder, "artifact1.tar.gz"), path.Join(artifactFolder, "artifact1.tar.gz"),
filepath.Join(artifactFolder, "artifact2.tar.gz"), path.Join(artifactFolder, "artifact2.tar.gz"),
filepath.Join(artifactFolder, "artifact3.tar.gz"), path.Join(artifactFolder, "artifact3.tar.gz"),
}, },
}, },
} }
@ -663,7 +581,9 @@ func TestStorage_getGarbageFiles(t *testing.T) {
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t) g := NewWithT(t)
dir := t.TempDir() dir, err := os.MkdirTemp("", "")
g.Expect(err).ToNot(HaveOccurred())
t.Cleanup(func() { os.RemoveAll(dir) })
s, err := NewStorage(dir, "hostname", tt.ttl, tt.maxItemsToBeRetained) s, err := NewStorage(dir, "hostname", tt.ttl, tt.maxItemsToBeRetained)
g.Expect(err).ToNot(HaveOccurred(), "failed to create new storage") g.Expect(err).ToNot(HaveOccurred(), "failed to create new storage")
@ -671,9 +591,9 @@ func TestStorage_getGarbageFiles(t *testing.T) {
artifact := sourcev1.Artifact{ artifact := sourcev1.Artifact{
Path: tt.artifactPaths[len(tt.artifactPaths)-1], Path: tt.artifactPaths[len(tt.artifactPaths)-1],
} }
g.Expect(os.MkdirAll(filepath.Join(dir, artifactFolder), 0o750)).ToNot(HaveOccurred()) g.Expect(os.MkdirAll(path.Join(dir, artifactFolder), 0o750)).ToNot(HaveOccurred())
for _, artifactPath := range tt.artifactPaths { for _, artifactPath := range tt.artifactPaths {
f, err := os.Create(filepath.Join(dir, artifactPath)) f, err := os.Create(path.Join(dir, artifactPath))
g.Expect(err).ToNot(HaveOccurred()) g.Expect(err).ToNot(HaveOccurred())
g.Expect(f.Close()).ToNot(HaveOccurred()) g.Expect(f.Close()).ToNot(HaveOccurred())
time.Sleep(tt.createPause) time.Sleep(tt.createPause)
@ -699,11 +619,10 @@ func TestStorage_getGarbageFiles(t *testing.T) {
} }
func TestStorage_GarbageCollect(t *testing.T) { func TestStorage_GarbageCollect(t *testing.T) {
artifactFolder := filepath.Join("foo", "bar") artifactFolder := path.Join("foo", "bar")
tests := []struct { tests := []struct {
name string name string
artifactPaths []string artifactPaths []string
wantCollected []string
wantDeleted []string wantDeleted []string
wantErr string wantErr string
ctxTimeout time.Duration ctxTimeout time.Duration
@ -711,32 +630,24 @@ func TestStorage_GarbageCollect(t *testing.T) {
{ {
name: "garbage collects", name: "garbage collects",
artifactPaths: []string{ artifactPaths: []string{
filepath.Join(artifactFolder, "artifact1.tar.gz"), path.Join(artifactFolder, "artifact1.tar.gz"),
filepath.Join(artifactFolder, "artifact1.tar.gz.lock"), path.Join(artifactFolder, "artifact2.tar.gz"),
filepath.Join(artifactFolder, "artifact2.tar.gz"), path.Join(artifactFolder, "artifact3.tar.gz"),
filepath.Join(artifactFolder, "artifact2.tar.gz.lock"), path.Join(artifactFolder, "artifact4.tar.gz"),
filepath.Join(artifactFolder, "artifact3.tar.gz"),
filepath.Join(artifactFolder, "artifact4.tar.gz"),
},
wantCollected: []string{
filepath.Join(artifactFolder, "artifact1.tar.gz"),
filepath.Join(artifactFolder, "artifact2.tar.gz"),
}, },
wantDeleted: []string{ wantDeleted: []string{
filepath.Join(artifactFolder, "artifact1.tar.gz"), path.Join(artifactFolder, "artifact1.tar.gz"),
filepath.Join(artifactFolder, "artifact1.tar.gz.lock"), path.Join(artifactFolder, "artifact2.tar.gz"),
filepath.Join(artifactFolder, "artifact2.tar.gz"),
filepath.Join(artifactFolder, "artifact2.tar.gz.lock"),
}, },
ctxTimeout: time.Second * 1, ctxTimeout: time.Second * 1,
}, },
{ {
name: "garbage collection fails with context timeout", name: "garbage collection fails with context timeout",
artifactPaths: []string{ artifactPaths: []string{
filepath.Join(artifactFolder, "artifact1.tar.gz"), path.Join(artifactFolder, "artifact1.tar.gz"),
filepath.Join(artifactFolder, "artifact2.tar.gz"), path.Join(artifactFolder, "artifact2.tar.gz"),
filepath.Join(artifactFolder, "artifact3.tar.gz"), path.Join(artifactFolder, "artifact3.tar.gz"),
filepath.Join(artifactFolder, "artifact4.tar.gz"), path.Join(artifactFolder, "artifact4.tar.gz"),
}, },
wantErr: "context deadline exceeded", wantErr: "context deadline exceeded",
ctxTimeout: time.Nanosecond * 1, ctxTimeout: time.Nanosecond * 1,
@ -746,7 +657,9 @@ func TestStorage_GarbageCollect(t *testing.T) {
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t) g := NewWithT(t)
dir := t.TempDir() dir, err := os.MkdirTemp("", "")
g.Expect(err).ToNot(HaveOccurred())
t.Cleanup(func() { os.RemoveAll(dir) })
s, err := NewStorage(dir, "hostname", time.Second*2, 2) s, err := NewStorage(dir, "hostname", time.Second*2, 2)
g.Expect(err).ToNot(HaveOccurred(), "failed to create new storage") g.Expect(err).ToNot(HaveOccurred(), "failed to create new storage")
@ -754,9 +667,9 @@ func TestStorage_GarbageCollect(t *testing.T) {
artifact := sourcev1.Artifact{ artifact := sourcev1.Artifact{
Path: tt.artifactPaths[len(tt.artifactPaths)-1], Path: tt.artifactPaths[len(tt.artifactPaths)-1],
} }
g.Expect(os.MkdirAll(filepath.Join(dir, artifactFolder), 0o750)).ToNot(HaveOccurred()) g.Expect(os.MkdirAll(path.Join(dir, artifactFolder), 0o750)).ToNot(HaveOccurred())
for i, artifactPath := range tt.artifactPaths { for i, artifactPath := range tt.artifactPaths {
f, err := os.Create(filepath.Join(dir, artifactPath)) f, err := os.Create(path.Join(dir, artifactPath))
g.Expect(err).ToNot(HaveOccurred()) g.Expect(err).ToNot(HaveOccurred())
g.Expect(f.Close()).ToNot(HaveOccurred()) g.Expect(f.Close()).ToNot(HaveOccurred())
if i != len(tt.artifactPaths)-1 { if i != len(tt.artifactPaths)-1 {
@ -764,90 +677,29 @@ func TestStorage_GarbageCollect(t *testing.T) {
} }
} }
collectedPaths, err := s.GarbageCollect(context.TODO(), artifact, tt.ctxTimeout) deletedPaths, err := s.GarbageCollect(context.TODO(), artifact, tt.ctxTimeout)
if tt.wantErr == "" { if tt.wantErr == "" {
g.Expect(err).ToNot(HaveOccurred(), "failed to collect garbage files") g.Expect(err).ToNot(HaveOccurred(), "failed to collect garbage files")
} else { } else {
g.Expect(err).To(HaveOccurred()) g.Expect(err).To(HaveOccurred())
g.Expect(err.Error()).To(ContainSubstring(tt.wantErr)) g.Expect(err.Error()).To(ContainSubstring(tt.wantErr))
} }
if len(tt.wantCollected) > 0 { if len(tt.wantDeleted) > 0 {
g.Expect(len(tt.wantCollected)).To(Equal(len(collectedPaths))) g.Expect(len(tt.wantDeleted)).To(Equal(len(deletedPaths)))
for _, wantCollectedPath := range tt.wantCollected { for _, wantDeletedPath := range tt.wantDeleted {
present := false present := false
for _, collectedPath := range collectedPaths { for _, deletedPath := range deletedPaths {
if strings.Contains(collectedPath, wantCollectedPath) { if strings.Contains(deletedPath, wantDeletedPath) {
g.Expect(collectedPath).ToNot(BeAnExistingFile()) g.Expect(deletedPath).ToNot(BeAnExistingFile())
present = true present = true
break break
} }
} }
if present == false { if present == false {
g.Fail(fmt.Sprintf("expected file to be garbage collected, still exists: %s", wantCollectedPath)) g.Fail(fmt.Sprintf("expected file to be deleted, still exists: %s", wantDeletedPath))
} }
} }
} }
for _, delFile := range tt.wantDeleted {
g.Expect(filepath.Join(dir, delFile)).ToNot(BeAnExistingFile())
}
}) })
} }
} }
func TestStorage_VerifyArtifact(t *testing.T) {
g := NewWithT(t)
dir := t.TempDir()
s, err := NewStorage(dir, "", 0, 0)
g.Expect(err).ToNot(HaveOccurred(), "failed to create new storage")
g.Expect(os.WriteFile(filepath.Join(dir, "artifact"), []byte("test"), 0o600)).To(Succeed())
t.Run("artifact without digest", func(t *testing.T) {
g := NewWithT(t)
err := s.VerifyArtifact(sourcev1.Artifact{})
g.Expect(err).To(HaveOccurred())
g.Expect(err).To(MatchError("artifact has no digest"))
})
t.Run("artifact with invalid digest", func(t *testing.T) {
g := NewWithT(t)
err := s.VerifyArtifact(sourcev1.Artifact{Digest: "invalid"})
g.Expect(err).To(HaveOccurred())
g.Expect(err).To(MatchError("failed to parse artifact digest 'invalid': invalid checksum digest format"))
})
t.Run("artifact with invalid path", func(t *testing.T) {
g := NewWithT(t)
err := s.VerifyArtifact(sourcev1.Artifact{
Digest: "sha256:9ba7a35ce8acd3557fe30680ef193ca7a36bb5dc62788f30de7122a0a5beab69",
Path: "invalid",
})
g.Expect(err).To(HaveOccurred())
g.Expect(errors.Is(err, os.ErrNotExist)).To(BeTrue())
})
t.Run("artifact with digest mismatch", func(t *testing.T) {
g := NewWithT(t)
err := s.VerifyArtifact(sourcev1.Artifact{
Digest: "sha256:9ba7a35ce8acd3557fe30680ef193ca7a36bb5dc62788f30de7122a0a5beab69",
Path: "artifact",
})
g.Expect(err).To(HaveOccurred())
g.Expect(err).To(MatchError("computed digest doesn't match 'sha256:9ba7a35ce8acd3557fe30680ef193ca7a36bb5dc62788f30de7122a0a5beab69'"))
})
t.Run("artifact with digest match", func(t *testing.T) {
g := NewWithT(t)
err := s.VerifyArtifact(sourcev1.Artifact{
Digest: "sha256:9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08",
Path: "artifact",
})
g.Expect(err).ToNot(HaveOccurred())
})
}

207
controllers/suite_test.go Normal file
View File

@ -0,0 +1,207 @@
/*
Copyright 2020 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"fmt"
"math/rand"
"os"
"path/filepath"
"testing"
"time"
"helm.sh/helm/v3/pkg/getter"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/record"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/fluxcd/pkg/runtime/controller"
"github.com/fluxcd/pkg/runtime/testenv"
"github.com/fluxcd/pkg/testserver"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
"github.com/fluxcd/source-controller/internal/cache"
// +kubebuilder:scaffold:imports
)
// These tests make use of plain Go using Gomega for assertions.
// At the beginning of every (sub)test Gomega can be initialized
// using gomega.NewWithT.
// Refer to http://onsi.github.io/gomega/ to learn more about
// Gomega.
const (
timeout = 10 * time.Second
interval = 1 * time.Second
retentionTTL = 2 * time.Second
retentionRecords = 2
)
var (
testEnv *testenv.Environment
testStorage *Storage
testServer *testserver.ArtifactServer
testMetricsH controller.Metrics
ctx = ctrl.SetupSignalHandler()
)
var (
testGetters = getter.Providers{
getter.Provider{
Schemes: []string{"http", "https"},
New: getter.NewHTTPGetter,
},
}
)
var (
tlsPublicKey []byte
tlsPrivateKey []byte
tlsCA []byte
)
func init() {
rand.Seed(time.Now().UnixNano())
}
func TestMain(m *testing.M) {
initTestTLS()
utilruntime.Must(sourcev1.AddToScheme(scheme.Scheme))
testEnv = testenv.New(testenv.WithCRDPath(filepath.Join("..", "config", "crd", "bases")))
var err error
testServer, err = testserver.NewTempArtifactServer()
if err != nil {
panic(fmt.Sprintf("Failed to create a temporary storage server: %v", err))
}
fmt.Println("Starting the test storage server")
testServer.Start()
testStorage, err = newTestStorage(testServer.HTTPServer)
if err != nil {
panic(fmt.Sprintf("Failed to create a test storage: %v", err))
}
testMetricsH = controller.MustMakeMetrics(testEnv)
if err := (&GitRepositoryReconciler{
Client: testEnv,
EventRecorder: record.NewFakeRecorder(32),
Metrics: testMetricsH,
Storage: testStorage,
}).SetupWithManager(testEnv); err != nil {
panic(fmt.Sprintf("Failed to start GitRepositoryReconciler: %v", err))
}
if err := (&BucketReconciler{
Client: testEnv,
EventRecorder: record.NewFakeRecorder(32),
Metrics: testMetricsH,
Storage: testStorage,
}).SetupWithManager(testEnv); err != nil {
panic(fmt.Sprintf("Failed to start BucketReconciler: %v", err))
}
if err := (&HelmRepositoryReconciler{
Client: testEnv,
EventRecorder: record.NewFakeRecorder(32),
Metrics: testMetricsH,
Getters: testGetters,
Storage: testStorage,
}).SetupWithManager(testEnv); err != nil {
panic(fmt.Sprintf("Failed to start HelmRepositoryReconciler: %v", err))
}
c := cache.New(5, 1*time.Second)
cacheRecorder := cache.MustMakeMetrics()
if err := (&HelmChartReconciler{
Client: testEnv,
EventRecorder: record.NewFakeRecorder(32),
Metrics: testMetricsH,
Getters: testGetters,
Storage: testStorage,
Cache: c,
TTL: 1 * time.Second,
CacheRecorder: cacheRecorder,
}).SetupWithManager(testEnv); err != nil {
panic(fmt.Sprintf("Failed to start HelmRepositoryReconciler: %v", err))
}
go func() {
fmt.Println("Starting the test environment")
if err := testEnv.Start(ctx); err != nil {
panic(fmt.Sprintf("Failed to start the test environment manager: %v", err))
}
}()
<-testEnv.Manager.Elected()
code := m.Run()
fmt.Println("Stopping the test environment")
if err := testEnv.Stop(); err != nil {
panic(fmt.Sprintf("Failed to stop the test environment: %v", err))
}
fmt.Println("Stopping the storage server")
testServer.Stop()
if err := os.RemoveAll(testServer.Root()); err != nil {
panic(fmt.Sprintf("Failed to remove storage server dir: %v", err))
}
os.Exit(code)
}
func initTestTLS() {
var err error
tlsPublicKey, err = os.ReadFile("testdata/certs/server.pem")
if err != nil {
panic(err)
}
tlsPrivateKey, err = os.ReadFile("testdata/certs/server-key.pem")
if err != nil {
panic(err)
}
tlsCA, err = os.ReadFile("testdata/certs/ca.pem")
if err != nil {
panic(err)
}
}
func newTestStorage(s *testserver.HTTPServer) (*Storage, error) {
storage, err := NewStorage(s.Root(), s.URL(), retentionTTL, retentionRecords)
if err != nil {
return nil, err
}
return storage, nil
}
var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz1234567890")
func randStringRunes(n int) string {
b := make([]rune, n)
for i := range b {
b[i] = letterRunes[rand.Intn(len(letterRunes))]
}
return string(b)
}
func int64p(i int64) *int64 {
return &i
}

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
all: server-key.pem client-key.pem all: server-key.pem
ca-key.pem: ca-csr.json ca-key.pem: ca-csr.json
cfssl gencert -initca ca-csr.json | cfssljson -bare ca cfssl gencert -initca ca-csr.json | cfssljson -bare ca
@ -28,13 +28,3 @@ server-key.pem: server-csr.json ca-config.json ca-key.pem
server-csr.json | cfssljson -bare server server-csr.json | cfssljson -bare server
sever.pem: server-key.pem sever.pem: server-key.pem
server.csr: server-key.pem server.csr: server-key.pem
client-key.pem: client-csr.json ca-config.json ca-key.pem
cfssl gencert \
-ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-profile=web-servers \
client-csr.json | cfssljson -bare client
client.pem: client-key.pem
client.csr: client-key.pem

5
controllers/testdata/certs/ca-key.pem vendored Normal file
View File

@ -0,0 +1,5 @@
-----BEGIN EC PRIVATE KEY-----
MHcCAQEEIOH/u9dMcpVcZ0+X9Fc78dCTj8SHuXawhLjhu/ej64WToAoGCCqGSM49
AwEHoUQDQgAEruH/kPxtX3cyYR2G7TYmxLq6AHyzo/NGXc9XjGzdJutE2SQzn37H
dvSJbH+Lvqo9ik0uiJVRVdCYD1j7gNszGA==
-----END EC PRIVATE KEY-----

9
controllers/testdata/certs/ca.csr vendored Normal file
View File

@ -0,0 +1,9 @@
-----BEGIN CERTIFICATE REQUEST-----
MIIBIDCBxgIBADAZMRcwFQYDVQQDEw5leGFtcGxlLmNvbSBDQTBZMBMGByqGSM49
AgEGCCqGSM49AwEHA0IABK7h/5D8bV93MmEdhu02JsS6ugB8s6PzRl3PV4xs3Sbr
RNkkM59+x3b0iWx/i76qPYpNLoiVUVXQmA9Y+4DbMxigSzBJBgkqhkiG9w0BCQ4x
PDA6MDgGA1UdEQQxMC+CCWxvY2FsaG9zdIILZXhhbXBsZS5jb22CD3d3dy5leGFt
cGxlLmNvbYcEfwAAATAKBggqhkjOPQQDAgNJADBGAiEAkw85nyLhJssyCYsaFvRU
EErhu66xHPJug/nG50uV5OoCIQCUorrflOSxfChPeCe4xfwcPv7FpcCYbKVYtGzz
b34Wow==
-----END CERTIFICATE REQUEST-----

11
controllers/testdata/certs/ca.pem vendored Normal file
View File

@ -0,0 +1,11 @@
-----BEGIN CERTIFICATE-----
MIIBhzCCAS2gAwIBAgIUdsAtiX3gN0uk7ddxASWYE/tdv0wwCgYIKoZIzj0EAwIw
GTEXMBUGA1UEAxMOZXhhbXBsZS5jb20gQ0EwHhcNMjAwNDE3MDgxODAwWhcNMjUw
NDE2MDgxODAwWjAZMRcwFQYDVQQDEw5leGFtcGxlLmNvbSBDQTBZMBMGByqGSM49
AgEGCCqGSM49AwEHA0IABK7h/5D8bV93MmEdhu02JsS6ugB8s6PzRl3PV4xs3Sbr
RNkkM59+x3b0iWx/i76qPYpNLoiVUVXQmA9Y+4DbMxijUzBRMA4GA1UdDwEB/wQE
AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQGyUiU1QEZiMAqjsnIYTwZ
4yp5wzAPBgNVHREECDAGhwR/AAABMAoGCCqGSM49BAMCA0gAMEUCIQDzdtvKdE8O
1+WRTZ9MuSiFYcrEz7Zne7VXouDEKqKEigIgM4WlbDeuNCKbqhqj+xZV0pa3rweb
OD8EjjCMY69RMO0=
-----END CERTIFICATE-----

View File

@ -0,0 +1,5 @@
-----BEGIN EC PRIVATE KEY-----
MHcCAQEEIKQbEXV6nljOHMmPrWVWQ+JrAE5wsbE9iMhfY7wlJgXOoAoGCCqGSM49
AwEHoUQDQgAE+53oBGlrvVUTelSGYji8GNHVhVg8jOs1PeeLuXCIZjQmctHLFEq3
fE+mGxCL93MtpYzlwIWBf0m7pEGQre6bzg==
-----END EC PRIVATE KEY-----

Some files were not shown because too many files have changed in this diff Show More