Compare commits

..

No commits in common. "main" and "v0.21.2" have entirely different histories.

360 changed files with 18937 additions and 73277 deletions

View File

@ -1 +1 @@
build/
hack/libgit2/

35
.github/actions/run-tests/Dockerfile vendored Normal file
View File

@ -0,0 +1,35 @@
ARG BASE_VARIANT=bullseye
ARG GO_VERSION=1.17.5
ARG XX_VERSION=1.1.0
ARG LIBGIT2_IMG=ghcr.io/fluxcd/golang-with-libgit2
ARG LIBGIT2_TAG=libgit2-1.1.1-3
FROM tonistiigi/xx:${XX_VERSION} AS xx
FROM ${LIBGIT2_IMG}:${LIBGIT2_TAG} as libgit2
FROM golang:${GO_VERSION}-${BASE_VARIANT} as gostable
# Copy the build utiltiies
COPY --from=xx / /
COPY --from=libgit2 /Makefile /libgit2/
# Install the libgit2 build dependencies
RUN make -C /libgit2 cmake
RUN make -C /libgit2 dependencies
# Compile and install libgit2
RUN FLAGS=$(xx-clang --print-cmake-defines) make -C /libgit2 libgit2
# Use the GitHub Actions uid:gid combination for proper fs permissions
RUN groupadd -g 116 test && \
useradd -u 1001 --gid test --shell /bin/sh --create-home test
# Set path to envtest binaries.
ENV PATH="/github/workspace/envtest:${PATH}"
# Run as test user
USER test
ENTRYPOINT [ "/bin/sh", "-c" ]

12
.github/actions/run-tests/action.yml vendored Normal file
View File

@ -0,0 +1,12 @@
name: 'Run tests'
description: 'Run tests in docker container'
inputs:
command:
description: 'Command to run inside the container'
required: true
default: 'make test'
runs:
using: 'docker'
image: 'Dockerfile'
args:
- ${{ inputs.command }}

View File

@ -1,40 +0,0 @@
version: 2
updates:
- package-ecosystem: "gomod"
directory: "/"
labels: ["dependencies"]
schedule:
interval: "monthly"
groups:
go-deps:
patterns:
- "*"
allow:
- dependency-type: "direct"
ignore:
# Cloud SDK are updated manually
- dependency-name: "cloud.google.com/*"
- dependency-name: "github.com/Azure/azure-sdk-for-go/*"
# Kubernetes deps are updated by fluxcd/pkg/runtime
- dependency-name: "k8s.io/*"
- dependency-name: "sigs.k8s.io/*"
- dependency-name: "github.com/go-logr/*"
# OCI deps are updated by fluxcd/pkg/oci
- dependency-name: "github.com/docker/*"
- dependency-name: "github.com/distribution/*"
- dependency-name: "github.com/google/go-containerregistry*"
- dependency-name: "github.com/opencontainers/*"
# Helm deps are updated by fluxcd/pkg/helmtestserver
- dependency-name: "helm.sh/helm/*"
# Flux APIs are updated at release time
- dependency-name: "github.com/fluxcd/source-controller/api"
- package-ecosystem: "github-actions"
directory: "/"
labels: ["area/ci", "dependencies"]
groups:
ci:
patterns:
- "*"
schedule:
interval: "monthly"

39
.github/labels.yaml vendored
View File

@ -1,39 +0,0 @@
# Configuration file to declaratively configure labels
# Ref: https://github.com/EndBug/label-sync#Config-files
- name: area/bucket
description: Bucket related issues and pull requests
color: '#00b140'
- name: area/git
description: Git related issues and pull requests
color: '#863faf'
- name: area/helm
description: Helm related issues and pull requests
color: '#1673b6'
- name: area/oci
description: OCI related issues and pull requests
color: '#c739ff'
- name: area/storage
description: Storage related issues and pull requests
color: '#4b0082'
- name: backport:release/v1.0.x
description: To be backported to release/v1.0.x
color: '#ffd700'
- name: backport:release/v1.1.x
description: To be backported to release/v1.1.x
color: '#ffd700'
- name: backport:release/v1.2.x
description: To be backported to release/v1.2.x
color: '#ffd700'
- name: backport:release/v1.3.x
description: To be backported to release/v1.3.x
color: '#ffd700'
- name: backport:release/v1.4.x
description: To be backported to release/v1.4.x
color: '#ffd700'
- name: backport:release/v1.5.x
description: To be backported to release/v1.5.x
color: '#ffd700'
- name: backport:release/v1.6.x
description: To be backported to release/v1.6.x
color: '#ffd700'

View File

@ -1,34 +0,0 @@
name: backport
on:
pull_request_target:
types: [closed, labeled]
permissions:
contents: read
jobs:
pull-request:
runs-on: ubuntu-latest
permissions:
contents: write
pull-requests: write
if: github.event.pull_request.state == 'closed' && github.event.pull_request.merged && (github.event_name != 'labeled' || startsWith('backport:', github.event.label.name))
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Create backport PRs
uses: korthout/backport-action@436145e922f9561fc5ea157ff406f21af2d6b363 # v3.2.0
# xref: https://github.com/korthout/backport-action#inputs
with:
# Use token to allow workflows to be triggered for the created PR
github_token: ${{ secrets.BOT_GITHUB_TOKEN }}
# Match labels with a pattern `backport:<target-branch>`
label_pattern: '^backport:([^ ]+)$'
# A bit shorter pull-request title than the default
pull_title: '[${target_branch}] ${pull_title}'
# Simpler PR description than default
pull_description: |-
Automated backport to `${target_branch}`, triggered by a label in #${pull_number}.

View File

@ -1,31 +0,0 @@
name: fuzz
on:
pull_request:
branches:
- 'main'
- 'release/**'
paths-ignore:
- 'CHANGELOG.md'
- 'README.md'
- 'MAINTAINERS'
permissions:
contents: read
jobs:
smoketest:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Setup Go
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
with:
go-version: 1.24.x
cache-dependency-path: |
**/go.sum
**/go.mod
- name: Smoke test Fuzzers
run: make fuzz-smoketest
env:
SKIP_COSIGN_VERIFICATION: true

View File

@ -1,15 +1,10 @@
name: e2e
on:
workflow_dispatch:
pull_request:
branches:
- 'main'
- 'release/**'
push:
branches:
- 'main'
- 'release/**'
- main
permissions:
contents: read # for actions/checkout to fetch code
@ -20,34 +15,68 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
uses: actions/checkout@v2
- name: Setup Go
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
uses: actions/setup-go@v2
with:
go-version: 1.24.x
cache-dependency-path: |
**/go.sum
**/go.mod
- name: Enable integration tests
# Only run integration tests for main and release branches
if: github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/heads/release/')
run: |
echo 'GO_TAGS=integration' >> $GITHUB_ENV
go-version: 1.17.x
- name: Restore Go cache
uses: actions/cache@v1
with:
path: /home/runner/work/_temp/_github_home/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Setup Kubernetes
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
uses: engineerd/setup-kind@v0.5.0
with:
cluster_name: kind
version: v0.11.1
image: kindest/node:v1.21.1@sha256:69860bda5563ac81e3c0057d654b5253219618a22ec3a346306239bba8cfa1a6
- name: Setup Kustomize
uses: fluxcd/pkg/actions/kustomize@main
- name: Setup Helm
uses: fluxcd/pkg/actions/helm@main
- name: Run tests
uses: ./.github/actions/run-tests
env:
GOROOT:
GOPATH: /github/home/go
- name: Verify
run: make verify
- name: Run E2E tests
env:
SKIP_COSIGN_VERIFICATION: true
CREATE_CLUSTER: false
run: make e2e
- name: Print controller logs
if: always()
continue-on-error: true
kind-linux-arm64:
# Hosted on Equinix
# Docs: https://github.com/fluxcd/flux2/tree/main/.github/runners
runs-on: [self-hosted, Linux, ARM64, equinix]
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Setup Go
uses: actions/setup-go@v2
with:
go-version: 1.17.x
- name: Prepare
id: prep
run: |
kubectl -n source-system logs -l app=source-controller
echo ::set-output name=CLUSTER::arm64-${GITHUB_SHA:0:7}-$(date +%s)
echo ::set-output name=CONTEXT::kind-arm64-${GITHUB_SHA:0:7}-$(date +%s)
- name: Setup Kubernetes Kind
run: |
kind create cluster --name ${{ steps.prep.outputs.CLUSTER }} --kubeconfig=/tmp/${{ steps.prep.outputs.CLUSTER }}
- name: Run e2e tests
env:
KIND_CLUSTER_NAME: ${{ steps.prep.outputs.CLUSTER }}
KUBECONFIG: /tmp/${{ steps.prep.outputs.CLUSTER }}
CREATE_CLUSTER: false
BUILD_PLATFORM: linux/arm64
MINIO_TAG: RELEASE.2020-09-17T04-49-20Z-arm64
run: make e2e
- name: Cleanup
if: always()
run: |
kind delete cluster --name ${{ steps.prep.outputs.CLUSTER }}
rm /tmp/${{ steps.prep.outputs.CLUSTER }}

View File

@ -14,17 +14,18 @@ jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@v2
- name: Setup QEMU
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
uses: docker/setup-qemu-action@v1
with:
platforms: all
- name: Setup Docker Buildx
id: buildx
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
uses: docker/setup-buildx-action@v1
with:
buildkitd-flags: "--debug"
- name: Build multi-arch container image
uses: docker/build-push-action@1dc73863535b631f98b2378be8619f83b136f4a0 # v6.17.0
uses: docker/build-push-action@v2
with:
push: false
builder: ${{ steps.buildx.outputs.name }}

View File

@ -7,29 +7,22 @@ on:
inputs:
tag:
description: 'image tag prefix'
default: 'preview'
default: 'rc'
required: true
permissions:
contents: read
contents: write # needed to write releases
id-token: write # needed for keyless signing
packages: write # needed for ghcr access
env:
CONTROLLER: ${{ github.event.repository.name }}
jobs:
release:
outputs:
hashes: ${{ steps.slsa.outputs.hashes }}
image_url: ${{ steps.slsa.outputs.image_url }}
image_digest: ${{ steps.slsa.outputs.image_digest }}
build-push:
runs-on: ubuntu-latest
permissions:
contents: write # for creating the GitHub release.
id-token: write # for creating OIDC tokens for signing.
packages: write # for pushing and signing container images.
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@v2
- name: Setup Kustomize
uses: fluxcd/pkg/actions/kustomize@main
- name: Prepare
@ -39,27 +32,27 @@ jobs:
if [[ $GITHUB_REF == refs/tags/* ]]; then
VERSION=${GITHUB_REF/refs\/tags\//}
fi
echo "BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_OUTPUT
echo "VERSION=${VERSION}" >> $GITHUB_OUTPUT
echo ::set-output name=BUILD_DATE::$(date -u +'%Y-%m-%dT%H:%M:%SZ')
echo ::set-output name=VERSION::${VERSION}
- name: Setup QEMU
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
uses: docker/setup-qemu-action@v1
- name: Setup Docker Buildx
id: buildx
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
uses: docker/setup-buildx-action@v1
- name: Login to GitHub Container Registry
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
uses: docker/login-action@v1
with:
registry: ghcr.io
username: fluxcdbot
password: ${{ secrets.GHCR_TOKEN }}
- name: Login to Docker Hub
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
uses: docker/login-action@v1
with:
username: fluxcdbot
password: ${{ secrets.DOCKER_FLUXCD_PASSWORD }}
- name: Generate images meta
id: meta
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5.7.0
uses: docker/metadata-action@v3
with:
images: |
fluxcd/${{ env.CONTROLLER }}
@ -67,11 +60,8 @@ jobs:
tags: |
type=raw,value=${{ steps.prep.outputs.VERSION }}
- name: Publish images
id: build-push
uses: docker/build-push-action@1dc73863535b631f98b2378be8619f83b136f4a0 # v6.17.0
uses: docker/build-push-action@v2
with:
sbom: true
provenance: true
push: true
builder: ${{ steps.buildx.outputs.name }}
context: .
@ -79,82 +69,32 @@ jobs:
platforms: linux/amd64,linux/arm/v7,linux/arm64
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
- uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3.8.2
- name: Check images
run: |
docker buildx imagetools inspect docker.io/fluxcd/${{ env.CONTROLLER }}:${{ steps.prep.outputs.VERSION }}
docker buildx imagetools inspect ghcr.io/fluxcd/${{ env.CONTROLLER }}:${{ steps.prep.outputs.VERSION }}
docker pull docker.io/fluxcd/${{ env.CONTROLLER }}:${{ steps.prep.outputs.VERSION }}
docker pull ghcr.io/fluxcd/${{ env.CONTROLLER }}:${{ steps.prep.outputs.VERSION }}
- uses: sigstore/cosign-installer@main
- name: Sign images
env:
COSIGN_EXPERIMENTAL: 1
run: |
cosign sign --yes fluxcd/${{ env.CONTROLLER }}@${{ steps.build-push.outputs.digest }}
cosign sign --yes ghcr.io/fluxcd/${{ env.CONTROLLER }}@${{ steps.build-push.outputs.digest }}
cosign sign fluxcd/${{ env.CONTROLLER }}:${{ steps.prep.outputs.VERSION }}
cosign sign ghcr.io/fluxcd/${{ env.CONTROLLER }}:${{ steps.prep.outputs.VERSION }}
- name: Generate release artifacts
if: startsWith(github.ref, 'refs/tags/v')
run: |
mkdir -p config/release
kustomize build ./config/crd > ./config/release/${{ env.CONTROLLER }}.crds.yaml
kustomize build ./config/manager > ./config/release/${{ env.CONTROLLER }}.deployment.yaml
- uses: anchore/sbom-action/download-syft@e11c554f704a0b820cbf8c51673f6945e0731532 # v0.20.0
echo '[CHANGELOG](https://github.com/fluxcd/${{ env.CONTROLLER }}/blob/main/CHANGELOG.md)' > ./config/release/notes.md
- uses: anchore/sbom-action/download-syft@v0
- name: Create release and SBOM
id: run-goreleaser
if: startsWith(github.ref, 'refs/tags/v')
uses: goreleaser/goreleaser-action@9c156ee8a17a598857849441385a2041ef570552 # v6.3.0
uses: goreleaser/goreleaser-action@v2
with:
version: latest
args: release --clean --skip=validate
args: release --release-notes=config/release/notes.md --rm-dist --skip-validate
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Generate SLSA metadata
id: slsa
env:
ARTIFACTS: "${{ steps.run-goreleaser.outputs.artifacts }}"
run: |
hashes=$(echo -E $ARTIFACTS | jq --raw-output '.[] | {name, "digest": (.extra.Digest // .extra.Checksum)} | select(.digest) | {digest} + {name} | join(" ") | sub("^sha256:";"")' | base64 -w0)
echo "hashes=$hashes" >> $GITHUB_OUTPUT
image_url=fluxcd/${{ env.CONTROLLER }}:${{ steps.prep.outputs.version }}
echo "image_url=$image_url" >> $GITHUB_OUTPUT
image_digest=${{ steps.build-push.outputs.digest }}
echo "image_digest=$image_digest" >> $GITHUB_OUTPUT
release-provenance:
needs: [release]
permissions:
actions: read # for detecting the Github Actions environment.
id-token: write # for creating OIDC tokens for signing.
contents: write # for uploading attestations to GitHub releases.
if: startsWith(github.ref, 'refs/tags/v')
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
with:
provenance-name: "provenance.intoto.jsonl"
base64-subjects: "${{ needs.release.outputs.hashes }}"
upload-assets: true
dockerhub-provenance:
needs: [release]
permissions:
actions: read # for detecting the Github Actions environment.
id-token: write # for creating OIDC tokens for signing.
packages: write # for uploading attestations.
if: startsWith(github.ref, 'refs/tags/v')
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v2.1.0
with:
image: ${{ needs.release.outputs.image_url }}
digest: ${{ needs.release.outputs.image_digest }}
registry-username: fluxcdbot
secrets:
registry-password: ${{ secrets.DOCKER_FLUXCD_PASSWORD }}
ghcr-provenance:
needs: [release]
permissions:
actions: read # for detecting the Github Actions environment.
id-token: write # for creating OIDC tokens for signing.
packages: write # for uploading attestations.
if: startsWith(github.ref, 'refs/tags/v')
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v2.1.0
with:
image: ghcr.io/${{ needs.release.outputs.image_url }}
digest: ${{ needs.release.outputs.image_digest }}
registry-username: fluxcdbot
secrets:
registry-password: ${{ secrets.GHCR_TOKEN }}

View File

@ -1,10 +1,10 @@
name: scan
name: Scan
on:
push:
branches: [ 'main', 'release/**' ]
branches: [ main ]
pull_request:
branches: [ 'main', 'release/**' ]
branches: [ main ]
schedule:
- cron: '18 10 * * 3'
@ -17,10 +17,9 @@ jobs:
name: FOSSA
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: actions/checkout@v2
- name: Run FOSSA scan and upload build data
uses: fossa-contrib/fossa-action@3d2ef181b1820d6dcd1972f86a767d18167fa19b # v3.0.1
uses: fossa-contrib/fossa-action@v1
with:
# FOSSA Push-Only API Token
fossa-api-key: 5ee8bf422db1471e0bcf2bcb289185de
@ -30,23 +29,13 @@ jobs:
name: CodeQL
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Setup Go
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
with:
go-version: 1.24.x
cache-dependency-path: |
**/go.sum
**/go.mod
- name: Checkout repository
uses: actions/checkout@v2
- name: Initialize CodeQL
uses: github/codeql-action/init@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3.28.18
uses: github/codeql-action/init@v1
with:
languages: go
# xref: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
# xref: https://codeql.github.com/codeql-query-help/go/
queries: security-and-quality
- name: Autobuild
uses: github/codeql-action/autobuild@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3.28.18
uses: github/codeql-action/autobuild@v1
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3.28.18
uses: github/codeql-action/analyze@v1

View File

@ -1,28 +0,0 @@
name: sync-labels
on:
workflow_dispatch:
push:
branches:
- main
paths:
- .github/labels.yaml
permissions:
contents: read
jobs:
labels:
name: Run sync
runs-on: ubuntu-latest
permissions:
issues: write
steps:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- uses: EndBug/label-sync@52074158190acb45f3077f9099fea818aa43f97a # v2.3.3
with:
# Configuration file
config-file: |
https://raw.githubusercontent.com/fluxcd/community/main/.github/standard-labels.yaml
.github/labels.yaml
# Strictly declarative
delete-other-labels: true

View File

@ -1,57 +0,0 @@
name: tests
on:
workflow_dispatch:
pull_request:
branches:
- 'main'
- 'release/**'
push:
branches:
- 'main'
- 'release/**'
permissions:
contents: read # for actions/checkout to fetch code
jobs:
test-linux-amd64:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Setup Go
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
with:
go-version: 1.24.x
cache-dependency-path: |
**/go.sum
**/go.mod
- name: Run tests
env:
SKIP_COSIGN_VERIFICATION: true
TEST_AZURE_ACCOUNT_NAME: ${{ secrets.TEST_AZURE_ACCOUNT_NAME }}
TEST_AZURE_ACCOUNT_KEY: ${{ secrets.TEST_AZURE_ACCOUNT_KEY }}
run: make test
test-linux-arm64:
runs-on:
group: "ARM64"
if: github.actor != 'dependabot[bot]'
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Setup Go
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
with:
go-version: 1.24.x
cache-dependency-path: |
**/go.sum
**/go.mod
- name: Run tests
env:
SKIP_COSIGN_VERIFICATION: true
TEST_AZURE_ACCOUNT_NAME: ${{ secrets.TEST_AZURE_ACCOUNT_NAME }}
TEST_AZURE_ACCOUNT_KEY: ${{ secrets.TEST_AZURE_ACCOUNT_KEY }}
run: make test

View File

@ -1,31 +0,0 @@
name: verify
on:
pull_request:
branches:
- 'main'
- 'release/**'
push:
branches:
- 'main'
- 'release/**'
permissions:
contents: read # for actions/checkout to fetch code
jobs:
verify-linux-amd64:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Setup Go
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
with:
go-version: 1.24.x
cache-dependency-path: |
**/go.sum
**/go.mod
- name: Verify
run: make verify

3
.gitignore vendored
View File

@ -17,5 +17,8 @@ bin/
testbin/
config/release/
# Exclude all libgit2 related files
hack/libgit2/
# Exclude temporary build files
build/

View File

@ -4,26 +4,9 @@ builds:
- skip: true
release:
prerelease: "true"
extra_files:
- glob: config/release/*.yaml
prerelease: "auto"
header: |
## Changelog
[{{.Tag}} changelog](https://github.com/fluxcd/{{.ProjectName}}/blob/{{.Tag}}/CHANGELOG.md)
footer: |
## Container images
- `docker.io/fluxcd/{{.ProjectName}}:{{.Tag}}`
- `ghcr.io/fluxcd/{{.ProjectName}}:{{.Tag}}`
Supported architectures: `linux/amd64`, `linux/arm64` and `linux/arm/v7`.
The container images are built on GitHub hosted runners and are signed with cosign and GitHub OIDC.
To verify the images and their provenance (SLSA level 3), please see the [security documentation](https://fluxcd.io/flux/security/).
changelog:
disable: true
checksum:
extra_files:
@ -49,7 +32,6 @@ signs:
certificate: "${artifact}.pem"
args:
- sign-blob
- "--yes"
- "--output-certificate=${certificate}"
- "--output-signature=${signature}"
- "${artifact}"

1234
ATTRIBUTIONS.md Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
# Development
> **Note:** Please take a look at <https://fluxcd.io/contributing/flux/>
> **Note:** Please take a look at <https://fluxcd.io/docs/contributing/flux/>
> to find out about how to contribute to Flux and how to interact with the
> Flux Development team.
@ -9,13 +9,52 @@
There are a number of dependencies required to be able to run the controller and its test suite locally:
- [Install Go](https://golang.org/doc/install)
- [Install Kustomize](https://kubectl.docs.kubernetes.io/installation/kustomize/)
- [Install Kustomize](https://kubernetes-sigs.github.io/kustomize/installation/)
- [Install Docker](https://docs.docker.com/engine/install/)
- (Optional) [Install Kubebuilder](https://book.kubebuilder.io/quick-start.html#installation)
The dependency [libgit2](https://libgit2.org/) also needs to be installed to be able
to run `source-controller` or its test-suite locally (not in a container).
In case this dependency is not present on your system (at the expected
version), the first invocation of a `make` target that requires the
dependency will attempt to compile it locally to `hack/libgit2`. For this build
to succeed ensure the following dependencies are present on your system:
- [CMake](https://cmake.org/download/)
- [OpenSSL 1.1](https://www.openssl.org/source/)
- [LibSSH2](https://www.libssh2.org/)
- [pkg-config](https://freedesktop.org/wiki/Software/pkg-config/)
Triggering a manual build of the dependency is possible as well by running
`make libgit2`. To enforce the build, for example if your system dependencies
match but are not linked in a compatible way, append `LIBGIT2_FORCE=1` to the
`make` command.
Follow the instructions below to install these dependencies to your system.
### macOS
```console
$ # Ensure libgit2 dependencies are available
$ brew install cmake openssl@1.1 libssh2 pkg-config
$ LIBGIT2_FORCE=1 make libgit2
```
### Linux
```console
$ # Ensure libgit2 dependencies are available
$ pacman -S cmake openssl libssh2
$ LIBGIT2_FORCE=1 make libgit2
```
**Note:** Example shown is for Arch Linux, but likewise procedure can be
followed using any other package manager. Some distributions may have slight
variation of package names (e.g. `apt install -y cmake openssl libssh2-1-dev`).
In addition to the above, the following dependencies are also used by some of the `make` targets:
- `controller-gen` (v0.12.0)
- `controller-gen` (v0.7.0)
- `gen-crd-api-reference-docs` (v0.3.0)
- `setup-envtest` (latest)
@ -24,7 +63,7 @@ If any of the above dependencies are not present on your system, the first invoc
## How to run the test suite
Prerequisites:
* Go >= 1.24
* Go >= 1.17
You can run the test suite by simply doing
@ -32,14 +71,6 @@ You can run the test suite by simply doing
make test
```
### Additional test configuration
By setting the `GO_TEST_ARGS` environment variable you can pass additional flags to [`go test`](https://pkg.go.dev/cmd/go#hdr-Test_packages):
```sh
make test GO_TEST_ARGS="-v -run=TestReadIgnoreFile/with_domain"
```
## How to run the controller locally
Install the controller's CRDs on your test cluster:
@ -115,25 +146,3 @@ Deploy `source-controller` into the cluster that is configured in the local kube
```sh
make deploy
```
### Debugging controller with VSCode
Create a `.vscode/launch.json` file:
```json
{
"version": "0.2.0",
"configurations": [
{
"name": "Launch Package",
"type": "go",
"request": "launch",
"mode": "auto",
"program": "${workspaceFolder}/main.go",
"args": ["--storage-adv-addr=:0", "--storage-path=${workspaceFolder}/bin/data"]
}
]
}
```
Start debugging by either clicking `Run` > `Start Debugging` or using
the relevant shortcut.

View File

@ -1,15 +1,15 @@
ARG GO_VERSION=1.24
ARG XX_VERSION=1.6.1
ARG BASE_VARIANT=alpine
ARG GO_VERSION=1.17
ARG XX_VERSION=1.1.0
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
ARG LIBGIT2_IMG=ghcr.io/fluxcd/golang-with-libgit2
ARG LIBGIT2_TAG=libgit2-1.1.1-4
# Docker buildkit multi-arch build requires golang alpine
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS builder
FROM --platform=linux/amd64 ${LIBGIT2_IMG}:${LIBGIT2_TAG} as build-amd64
FROM --platform=linux/arm64 ${LIBGIT2_IMG}:${LIBGIT2_TAG} as build-arm64
FROM --platform=linux/arm/v7 ${LIBGIT2_IMG}:${LIBGIT2_TAG} as build-armv7
# Copy the build utilities.
COPY --from=xx / /
ARG TARGETPLATFORM
FROM --platform=$BUILDPLATFORM build-$TARGETARCH$TARGETVARIANT AS build
# Configure workspace
WORKDIR /workspace
@ -24,25 +24,47 @@ COPY go.sum go.sum
# Cache modules
RUN go mod download
# Copy source code
COPY main.go main.go
COPY pkg/ pkg/
COPY internal/ internal/
RUN apk add clang lld pkgconfig ca-certificates
# Build the binary
ENV CGO_ENABLED=1
ARG TARGETPLATFORM
ARG TARGETARCH
# build without specifing the arch
ENV CGO_ENABLED=0
RUN xx-go build -trimpath -a -o source-controller main.go
RUN xx-apk add --no-cache \
musl-dev gcc lld binutils-gold
FROM alpine:3.21
# Performance related changes:
# - Use read-only bind instead of copying go source files.
# - Cache go packages.
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
--mount=type=cache,target=/go/pkg \
export LIBRARY_PATH="/usr/local/$(xx-info triple)/lib:/usr/local/$(xx-info triple)/lib64:${LIBRARY_PATH}" && \
export PKG_CONFIG_PATH="/usr/local/$(xx-info triple)/lib/pkgconfig:/usr/local/$(xx-info triple)/lib64/pkgconfig" && \
export FLAGS="$(pkg-config --static --libs --cflags libssh2 openssl libgit2)" && \
CGO_LDFLAGS="${FLAGS} -static" \
xx-go build \
-ldflags "-s -w" \
-tags 'netgo,osusergo,static_build' \
-o /source-controller -trimpath main.go;
# Ensure that the binary was cross-compiled correctly to the target platform.
RUN xx-verify --static /source-controller
FROM alpine:3.15
ARG TARGETPLATFORM
RUN apk --no-cache add ca-certificates \
&& update-ca-certificates
COPY --from=builder /workspace/source-controller /usr/local/bin/
# Create minimal nsswitch.conf file to prioritize the usage of /etc/hosts over DNS queries.
# https://github.com/gliderlabs/docker-alpine/issues/367#issuecomment-354316460
RUN [ ! -e /etc/nsswitch.conf ] && echo 'hosts: files dns' > /etc/nsswitch.conf
# Copy over binary from build
COPY --from=build /source-controller /usr/local/bin/
COPY ATTRIBUTIONS.md /
USER 65534:65534
ENTRYPOINT [ "source-controller" ]

View File

@ -2,9 +2,7 @@ The maintainers are generally available in Slack at
https://cloud-native.slack.com in #flux (https://cloud-native.slack.com/messages/CLAJ40HV3)
(obtain an invitation at https://slack.cncf.io/).
In additional to those listed below, this project shares maintainers
from the main Flux v2 git repository, as listed in
This project shares maintainers from the main Flux v2 git repository,
as listed in
https://github.com/fluxcd/flux2/blob/main/MAINTAINERS
Dipti Pai, Microsoft <diptipai@microsoft.com> (github: @dipti-pai, slack: Dipti Pai)

224
Makefile
View File

@ -2,14 +2,9 @@
IMG ?= fluxcd/source-controller
TAG ?= latest
# Allows for defining additional Go test args, e.g. '-tags integration'.
GO_TEST_ARGS ?= -race
# Allows for filtering tests based on the specified prefix
GO_TEST_PREFIX ?=
# Defines whether cosign verification should be skipped.
SKIP_COSIGN_VERIFICATION ?= false
# Base image used to build the Go binary
LIBGIT2_IMG ?= ghcr.io/fluxcd/golang-with-libgit2
LIBGIT2_TAG ?= libgit2-1.1.1-4
# Allows for defining additional Docker buildx arguments,
# e.g. '--push'.
@ -17,82 +12,103 @@ BUILD_ARGS ?=
# Architectures to build images for
BUILD_PLATFORMS ?= linux/amd64,linux/arm64,linux/arm/v7
# Go additional tag arguments, e.g. 'integration',
# this is append to the tag arguments required for static builds
GO_TAGS ?=
# Produce CRDs that work back to Kubernetes 1.16
CRD_OPTIONS ?= crd:crdVersions=v1
# Repository root based on Git metadata
REPOSITORY_ROOT := $(shell git rev-parse --show-toplevel)
BUILD_DIR := $(REPOSITORY_ROOT)/build
# Libgit2 version
LIBGIT2_VERSION ?= 1.1.1
# Other dependency versions
ENVTEST_BIN_VERSION ?= 1.24.0
ENVTEST_BIN_VERSION ?= 1.19.2
# FUZZ_TIME defines the max amount of time, in Go Duration,
# each fuzzer should run for.
FUZZ_TIME ?= 1m
# libgit2 related magical paths
# These are used to determine if the target libgit2 version is already available on
# the system, or where they should be installed to
SYSTEM_LIBGIT2_VERSION := $(shell pkg-config --modversion libgit2 2>/dev/null)
LIBGIT2_PATH := $(REPOSITORY_ROOT)/hack/libgit2
LIBGIT2_LIB_PATH := $(LIBGIT2_PATH)/lib
LIBGIT2 := $(LIBGIT2_LIB_PATH)/libgit2.so.$(LIBGIT2_VERSION)
GO_STATIC_FLAGS=-ldflags "-s -w" -tags 'netgo,osusergo,static_build$(addprefix ,,$(GO_TAGS))'
# API (doc) generation utilities
CONTROLLER_GEN_VERSION ?= v0.16.1
GEN_API_REF_DOCS_VERSION ?= e327d0730470cbd61b06300f81c5fcf91c23c113
# If gobin not set, create one on ./build and add to path.
ifeq (,$(shell go env GOBIN))
export GOBIN=$(BUILD_DIR)/gobin
else
export GOBIN=$(shell go env GOBIN)
endif
export PATH:=${GOBIN}:${PATH}
# Architecture to use envtest with
ifeq ($(shell uname -m),x86_64)
ENVTEST_ARCH ?= amd64
else
ENVTEST_ARCH ?= arm64
ifneq ($(LIBGIT2_VERSION),$(SYSTEM_LIBGIT2_VERSION))
LIBGIT2_FORCE ?= 1
endif
ifeq ($(shell uname -s),Darwin)
# Envtest only supports darwin-amd64
ENVTEST_ARCH=amd64
LIBGIT2 := $(LIBGIT2_LIB_PATH)/libgit2.$(LIBGIT2_VERSION).dylib
HAS_BREW := $(shell brew --version 2>/dev/null)
ifdef HAS_BREW
HAS_OPENSSL := $(shell brew --prefix openssl@1.1)
endif
endif
all: manager
# Build manager binary
manager: generate fmt vet
go build $(GO_STATIC_FLAGS) -o $(BUILD_DIR)/bin/manager main.go
# API (doc) generation utilities
CONTROLLER_GEN_VERSION ?= v0.7.0
GEN_API_REF_DOCS_VERSION ?= v0.3.0
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
ifeq (,$(shell go env GOBIN))
GOBIN=$(shell go env GOPATH)/bin
else
GOBIN=$(shell go env GOBIN)
endif
ifeq ($(strip ${PKG_CONFIG_PATH}),)
MAKE_PKG_CONFIG_PATH = $(LIBGIT2_LIB_PATH)/pkgconfig
else
MAKE_PKG_CONFIG_PATH = ${PKG_CONFIG_PATH}:$(LIBGIT2_LIB_PATH)/pkgconfig
endif
ifdef HAS_OPENSSL
MAKE_PKG_CONFIG_PATH := $(MAKE_PKG_CONFIG_PATH):$(HAS_OPENSSL)/lib/pkgconfig
endif
# Architecture to use envtest with
ENVTEST_ARCH ?= amd64
all: build
build: $(LIBGIT2) ## Build manager binary
ifeq ($(shell uname -s),Darwin)
PKG_CONFIG_PATH=$(MAKE_PKG_CONFIG_PATH) \
CGO_LDFLAGS="-Wl,-rpath,$(LIBGIT2_LIB_PATH)" \
go build -o bin/manager main.go
else
PKG_CONFIG_PATH=$(MAKE_PKG_CONFIG_PATH) \
go build -o bin/manager main.go
endif
KUBEBUILDER_ASSETS?="$(shell $(ENVTEST) --arch=$(ENVTEST_ARCH) use -i $(ENVTEST_KUBERNETES_VERSION) --bin-dir=$(ENVTEST_ASSETS_DIR) -p path)"
test: install-envtest test-api ## Run all tests
HTTPS_PROXY="" HTTP_PROXY="" \
test: $(LIBGIT2) install-envtest test-api ## Run tests
ifeq ($(shell uname -s),Darwin)
LD_LIBRARY_PATH=$(LIBGIT2_LIB_PATH) \
PKG_CONFIG_PATH=$(MAKE_PKG_CONFIG_PATH) \
CGO_LDFLAGS="-Wl,-rpath,$(LIBGIT2_LIB_PATH)" \
KUBEBUILDER_ASSETS=$(KUBEBUILDER_ASSETS) \
GIT_CONFIG_GLOBAL=/dev/null \
GIT_CONFIG_NOSYSTEM=true \
go test $(GO_STATIC_FLAGS) \
./... \
$(GO_TEST_ARGS) \
-coverprofile cover.out
test-ctrl: install-envtest test-api ## Run controller tests
HTTPS_PROXY="" HTTP_PROXY="" \
go test ./... -coverprofile cover.out
else
LD_LIBRARY_PATH=$(LIBGIT2_LIB_PATH) \
PKG_CONFIG_PATH=$(MAKE_PKG_CONFIG_PATH) \
KUBEBUILDER_ASSETS=$(KUBEBUILDER_ASSETS) \
GIT_CONFIG_GLOBAL=/dev/null \
go test $(GO_STATIC_FLAGS) \
-run "^$(GO_TEST_PREFIX).*" \
-v ./internal/controller \
-coverprofile cover.out
go test ./... -coverprofile cover.out
endif
test-api: ## Run api tests
cd api; go test $(GO_TEST_ARGS) ./... -coverprofile cover.out
cd api; go test ./... -coverprofile cover.out
run: $(LIBGIT2) generate fmt vet manifests ## Run against the configured Kubernetes cluster in ~/.kube/config
ifeq ($(shell uname -s),Darwin)
LD_LIBRARY_PATH=$(LIBGIT2_LIB_PATH) \
CGO_LDFLAGS="-Wl,-rpath,$(LIBGIT2_LIB_PATH)" \
go run ./main.go
else
LD_LIBRARY_PATH=$(LIBGIT2_LIB_PATH) \
go run ./main.go
endif
run: generate fmt vet manifests ## Run against the configured Kubernetes cluster in ~/.kube/config
@mkdir -p $(PWD)/bin/data
go run $(GO_STATIC_FLAGS) ./main.go --storage-adv-addr=:0 --storage-path=$(PWD)/bin/data
install: manifests ## Install CRDs into a cluster
kustomize build config/crd | kubectl apply -f -
@ -115,25 +131,35 @@ manifests: controller-gen ## Generate manifests, e.g. CRD, RBAC, etc.
cd api; $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role paths="./..." output:crd:artifacts:config="../config/crd/bases"
api-docs: gen-crd-api-reference-docs ## Generate API reference documentation
$(GEN_CRD_API_REFERENCE_DOCS) -api-dir=./api/v1 -config=./hack/api-docs/config.json -template-dir=./hack/api-docs/template -out-file=./docs/api/v1/source.md
$(GEN_CRD_API_REFERENCE_DOCS) -api-dir=./api/v1beta1 -config=./hack/api-docs/config.json -template-dir=./hack/api-docs/template -out-file=./docs/api/source.md
tidy: ## Run go mod tidy
cd api; rm -f go.sum; go mod tidy -compat=1.24
rm -f go.sum; go mod tidy -compat=1.24
go mod tidy
cd api; go mod tidy
fmt: ## Run go fmt against code
go fmt ./...
cd api; go fmt ./...
vet: ## Run go vet against code
vet: $(LIBGIT2) ## Run go vet against code
ifeq ($(shell uname -s),Darwin)
PKG_CONFIG_PATH=$(MAKE_PKG_CONFIG_PATH) \
CGO_LDFLAGS="-Wl,-rpath,$(LIBGIT2_LIB_PATH)" \
go vet ./...
cd api; go vet ./...
else
PKG_CONFIG_PATH=$(MAKE_PKG_CONFIG_PATH) \
go vet ./...
cd api; go vet ./...
endif
generate: controller-gen ## Generate API code
cd api; $(CONTROLLER_GEN) object:headerFile="../hack/boilerplate.go.txt" paths="./..."
docker-build: ## Build the Docker image
docker buildx build \
--build-arg LIBGIT2_IMG=$(LIBGIT2_IMG) \
--build-arg LIBGIT2_TAG=$(LIBGIT2_TAG) \
--platform=$(BUILD_PLATFORMS) \
-t $(IMG):$(TAG) \
$(BUILD_ARGS) .
@ -142,45 +168,61 @@ docker-push: ## Push Docker image
docker push $(IMG):$(TAG)
# Find or download controller-gen
CONTROLLER_GEN = $(GOBIN)/controller-gen
CONTROLLER_GEN = $(shell pwd)/bin/controller-gen
.PHONY: controller-gen
controller-gen: ## Download controller-gen locally if necessary.
$(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_GEN_VERSION))
$(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.7.0)
# Find or download gen-crd-api-reference-docs
GEN_CRD_API_REFERENCE_DOCS = $(GOBIN)/gen-crd-api-reference-docs
GEN_CRD_API_REFERENCE_DOCS = $(shell pwd)/bin/gen-crd-api-reference-docs
.PHONY: gen-crd-api-reference-docs
gen-crd-api-reference-docs: ## Download gen-crd-api-reference-docs locally if necessary
$(call go-install-tool,$(GEN_CRD_API_REFERENCE_DOCS),github.com/ahmetb/gen-crd-api-reference-docs@$(GEN_API_REF_DOCS_VERSION))
$(call go-install-tool,$(GEN_CRD_API_REFERENCE_DOCS),github.com/ahmetb/gen-crd-api-reference-docs@v0.3.0)
ENVTEST = $(GOBIN)/setup-envtest
ENVTEST = $(shell pwd)/bin/setup-envtest
.PHONY: envtest
setup-envtest: ## Download setup-envtest locally if necessary.
$(call go-install-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest@latest)
ENVTEST_ASSETS_DIR=$(BUILD_DIR)/testbin
ENVTEST_ASSETS_DIR=$(shell pwd)/testbin
ENVTEST_KUBERNETES_VERSION?=latest
install-envtest: setup-envtest ## Download envtest binaries locally.
mkdir -p ${ENVTEST_ASSETS_DIR}
$(ENVTEST) use $(ENVTEST_KUBERNETES_VERSION) --arch=$(ENVTEST_ARCH) --bin-dir=$(ENVTEST_ASSETS_DIR)
# setup-envtest sets anything below k8s to 0555
chmod -R u+w $(BUILD_DIR)/testbin
libgit2: $(LIBGIT2) ## Detect or download libgit2 library
$(LIBGIT2):
ifeq (1, $(LIBGIT2_FORCE))
@{ \
set -e; \
mkdir -p $(LIBGIT2_PATH); \
curl -sL https://raw.githubusercontent.com/fluxcd/golang-with-libgit2/$(LIBGIT2_TAG)/hack/Makefile -o $(LIBGIT2_PATH)/Makefile; \
INSTALL_PREFIX=$(LIBGIT2_PATH) make -C $(LIBGIT2_PATH) libgit2; \
}
endif
.PHONY: help
help: ## Display this help menu
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-20s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
update-attributions:
./hack/update-attributions.sh
e2e:
./hack/ci/e2e.sh
verify: fmt vet manifests api-docs tidy
@if [ ! "$$(git status --porcelain --untracked-files=no)" = "" ]; then \
verify: update-attributions fmt vet manifests api-docs
ifneq (, $(shell git status --porcelain --untracked-files=no))
@{ \
echo "working directory is dirty:"; \
git --no-pager diff; \
exit 1; \
fi
}
endif
# go-install-tool will 'go install' any package $2 and install it to $1.
PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST))))
define go-install-tool
@[ -f $(1) ] || { \
set -e ;\
@ -188,33 +230,7 @@ TMP_DIR=$$(mktemp -d) ;\
cd $$TMP_DIR ;\
go mod init tmp ;\
echo "Downloading $(2)" ;\
env -i bash -c "GOBIN=$(GOBIN) PATH=\"$(PATH)\" GOPATH=$(shell go env GOPATH) GOCACHE=$(shell go env GOCACHE) go install $(2)" ;\
GOBIN=$(PROJECT_DIR)/bin go install $(2) ;\
rm -rf $$TMP_DIR ;\
}
endef
# Build fuzzers used by oss-fuzz.
fuzz-build:
rm -rf $(shell pwd)/build/fuzz/
mkdir -p $(shell pwd)/build/fuzz/out/
docker build . --tag local-fuzzing:latest -f tests/fuzz/Dockerfile.builder
docker run --rm \
-e FUZZING_LANGUAGE=go -e SANITIZER=address \
-e CIFUZZ_DEBUG='True' -e OSS_FUZZ_PROJECT_NAME=fluxcd \
-v "$(shell pwd)/build/fuzz/out":/out \
local-fuzzing:latest
# Run each fuzzer once to ensure they will work when executed by oss-fuzz.
fuzz-smoketest: fuzz-build
docker run --rm \
-v "$(shell pwd)/build/fuzz/out":/out \
-v "$(shell pwd)/tests/fuzz/oss_fuzz_run.sh":/runner.sh \
local-fuzzing:latest \
bash -c "/runner.sh"
# Run fuzz tests for the duration set in FUZZ_TIME.
fuzz-native:
KUBEBUILDER_ASSETS=$(KUBEBUILDER_ASSETS) \
FUZZ_TIME=$(FUZZ_TIME) \
./tests/fuzz/native_go_run.sh

30
PROJECT
View File

@ -1,27 +1,6 @@
domain: toolkit.fluxcd.io
repo: github.com/fluxcd/source-controller
resources:
- group: source
kind: GitRepository
version: v1
- group: source
kind: GitRepository
version: v1beta2
- group: source
kind: HelmRepository
version: v1
- group: source
kind: HelmRepository
version: v1beta2
- group: source
kind: HelmChart
version: v1
- group: source
kind: HelmChart
version: v1beta2
- group: source
kind: Bucket
version: v1beta2
- group: source
kind: GitRepository
version: v1beta1
@ -34,13 +13,4 @@ resources:
- group: source
kind: Bucket
version: v1beta1
- group: source
kind: OCIRepository
version: v1beta2
- group: source
kind: Bucket
version: v1
- group: source
kind: OCIRepository
version: v1
version: "2"

View File

@ -7,47 +7,21 @@
[![release](https://img.shields.io/github/release/fluxcd/source-controller/all.svg)](https://github.com/fluxcd/source-controller/releases)
The source-controller is a Kubernetes operator, specialised in artifacts acquisition
from external sources such as Git, OCI, Helm repositories and S3-compatible buckets.
from external sources such as Git, Helm repositories and S3 buckets.
The source-controller implements the
[source.toolkit.fluxcd.io](docs/spec/README.md) API
and is a core component of the [GitOps toolkit](https://fluxcd.io/flux/components/).
[source.toolkit.fluxcd.io](https://github.com/fluxcd/source-controller/tree/master/docs/spec/v1beta1) API
and is a core component of the [GitOps toolkit](https://toolkit.fluxcd.io).
![overview](docs/diagrams/source-controller-overview.png)
## APIs
Features:
| Kind | API Version |
|----------------------------------------------------|-------------------------------|
| [GitRepository](docs/spec/v1/gitrepositories.md) | `source.toolkit.fluxcd.io/v1` |
| [OCIRepository](docs/spec/v1/ocirepositories.md) | `source.toolkit.fluxcd.io/v1` |
| [HelmRepository](docs/spec/v1/helmrepositories.md) | `source.toolkit.fluxcd.io/v1` |
| [HelmChart](docs/spec/v1/helmcharts.md) | `source.toolkit.fluxcd.io/v1` |
| [Bucket](docs/spec/v1/buckets.md) | `source.toolkit.fluxcd.io/v1` |
## Features
* authenticates to sources (SSH, user/password, API token, Workload Identity)
* validates source authenticity (PGP, Cosign, Notation)
* authenticates to sources (SSH, user/password, API token)
* validates source authenticity (PGP)
* detects source changes based on update policies (semver)
* fetches resources on-demand and on-a-schedule
* packages the fetched resources into a well-known format (tar.gz, yaml)
* makes the artifacts addressable by their source identifier (sha, version, ts)
* makes the artifacts available in-cluster to interested 3rd parties
* notifies interested 3rd parties of source changes and availability (status conditions, events, hooks)
* reacts to Git, Helm and OCI artifacts push events (via [notification-controller](https://github.com/fluxcd/notification-controller))
## Guides
* [Get started with Flux](https://fluxcd.io/flux/get-started/)
* [Setup Webhook Receivers](https://fluxcd.io/flux/guides/webhook-receivers/)
* [Setup Notifications](https://fluxcd.io/flux/guides/notifications/)
* [How to build, publish and consume OCI Artifacts with Flux](https://fluxcd.io/flux/cheatsheets/oci-artifacts/)
## Roadmap
The roadmap for the Flux family of projects can be found at <https://fluxcd.io/roadmap/>.
## Contributing
This project is Apache 2.0 licensed and accepts contributions via GitHub pull requests.
To start contributing please see the [development guide](DEVELOPMENT.md).
* reacts to Git push and Helm chart upload events (via [notification-controller](https://github.com/fluxcd/notification-controller))

View File

@ -1,35 +1,28 @@
module github.com/fluxcd/source-controller/api
go 1.24.0
go 1.17
require (
github.com/fluxcd/pkg/apis/acl v0.7.0
github.com/fluxcd/pkg/apis/meta v1.12.0
k8s.io/apimachinery v0.33.0
sigs.k8s.io/controller-runtime v0.21.0
github.com/fluxcd/pkg/apis/acl v0.0.3
github.com/fluxcd/pkg/apis/meta v0.10.2
k8s.io/apimachinery v0.23.1
sigs.k8s.io/controller-runtime v0.11.0
)
// Fix CVE-2022-28948
replace gopkg.in/yaml.v3 => gopkg.in/yaml.v3 v3.0.1
require (
github.com/fxamacker/cbor/v2 v2.8.0 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/logr v1.2.2 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/google/go-cmp v0.5.6 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/spf13/pflag v1.0.6 // indirect
github.com/x448/float16 v0.8.4 // indirect
golang.org/x/net v0.40.0 // indirect
golang.org/x/text v0.25.0 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
golang.org/x/net v0.0.0-20211215060638-4ddde0e984e9 // indirect
golang.org/x/text v0.3.7 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e // indirect
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.7.0 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/klog/v2 v2.30.0 // indirect
k8s.io/utils v0.0.0-20211208161948-7d6a63dca704 // indirect
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.0 // indirect
)

View File

@ -1,117 +1,931 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/fluxcd/pkg/apis/acl v0.7.0 h1:dMhZJH+g6ZRPjs4zVOAN9vHBd1DcavFgcIFkg5ooOE0=
github.com/fluxcd/pkg/apis/acl v0.7.0/go.mod h1:uv7pXXR/gydiX4MUwlQa7vS8JONEDztynnjTvY3JxKQ=
github.com/fluxcd/pkg/apis/meta v1.12.0 h1:XW15TKZieC2b7MN8VS85stqZJOx+/b8jATQ/xTUhVYg=
github.com/fluxcd/pkg/apis/meta v1.12.0/go.mod h1:+son1Va60x2eiDcTwd7lcctbI6C+K3gM7R+ULmEq1SI=
github.com/fxamacker/cbor/v2 v2.8.0 h1:fFtUGXUzXPHTIUdne5+zzMPTfffl3RD5qYnkY40vtxU=
github.com/fxamacker/cbor/v2 v2.8.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ=
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fluxcd/pkg/apis/acl v0.0.3 h1:Lw0ZHdpnO4G7Zy9KjrzwwBmDZQuy4qEjaU/RvA6k1lc=
github.com/fluxcd/pkg/apis/acl v0.0.3/go.mod h1:XPts6lRJ9C9fIF9xVWofmQwftvhY25n1ps7W9xw0XLU=
github.com/fluxcd/pkg/apis/meta v0.10.2 h1:pnDBBEvfs4HaKiVAYgz+e/AQ8dLvcgmVfSeBroZ/KKI=
github.com/fluxcd/pkg/apis/meta v0.10.2/go.mod h1:KQ2er9xa6koy7uoPMZjIjNudB5p4tXs+w0GO6fRcy7I=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI=
github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg=
github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.2 h1:ahHml/yUpnlb96Rp8HCvtYVPY8ZYpxq3g7UYchIYwbs=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
github.com/google/cel-go v0.9.0/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w=
github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg=
github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw=
github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE=
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0=
go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE=
go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc=
go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4=
go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM=
go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU=
go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc=
go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE=
go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE=
go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211215060638-4ddde0e984e9 h1:kmreh1vGI63l2FxOAYS3Yv6ATsi7lSTuwNSVbGfJV9I=
golang.org/x/net v0.0.0-20211215060638-4ddde0e984e9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8 h1:M69LAlWZCshgp0QSzyDcSsSIejIEeuaCVpmwcKwyLMk=
golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4=
golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201102152239-715cce707fb0/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
k8s.io/api v0.33.0 h1:yTgZVn1XEe6opVpP1FylmNrIFWuDqe2H0V8CT5gxfIU=
k8s.io/api v0.33.0/go.mod h1:CTO61ECK/KU7haa3qq8sarQ0biLq2ju405IZAd9zsiM=
k8s.io/apimachinery v0.33.0 h1:1a6kHrJxb2hs4t8EE5wuR/WxKDwGN1FKH3JvDtA0CIQ=
k8s.io/apimachinery v0.33.0/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e h1:KqK5c/ghOm8xkHYhlodbp6i6+r+ChV2vuAuVRdFbLro=
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8=
sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/structured-merge-diff/v4 v4.7.0 h1:qPeWmscJcXP0snki5IYF79Z8xrl8ETFxgMd7wez1XkI=
sigs.k8s.io/structured-merge-diff/v4 v4.7.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.23.0 h1:WrL1gb73VSC8obi8cuYETJGXEoFNEh3LU0Pt+Sokgro=
k8s.io/api v0.23.0/go.mod h1:8wmDdLBHBNxtOIytwLstXt5E9PddnZb0GaMcqsvDBpg=
k8s.io/apiextensions-apiserver v0.23.0/go.mod h1:xIFAEEDlAZgpVBl/1VSjGDmLoXAWRG40+GsWhKhAxY4=
k8s.io/apimachinery v0.23.0/go.mod h1:fFCTTBKvKcwTPFzjlcxp91uPFZr+JA0FubU4fLzzFYc=
k8s.io/apimachinery v0.23.1 h1:sfBjlDFwj2onG0Ijx5C+SrAoeUscPrmghm7wHP+uXlo=
k8s.io/apimachinery v0.23.1/go.mod h1:SADt2Kl8/sttJ62RRsi9MIV4o8f5S3coArm0Iu3fBno=
k8s.io/apiserver v0.23.0/go.mod h1:Cec35u/9zAepDPPFyT+UMrgqOCjgJ5qtfVJDxjZYmt4=
k8s.io/client-go v0.23.0/go.mod h1:hrDnpnK1mSr65lHHcUuIZIXDgEbzc7/683c6hyG4jTA=
k8s.io/code-generator v0.23.0/go.mod h1:vQvOhDXhuzqiVfM/YHp+dmg10WDZCchJVObc9MvowsE=
k8s.io/component-base v0.23.0/go.mod h1:DHH5uiFvLC1edCpvcTDV++NKULdYYU6pR9Tt3HIKMKI=
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw=
k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk=
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20211208161948-7d6a63dca704 h1:ZKMMxTvduyf5WUtREOqg5LiXaN1KO/+0oOQPRFrClpo=
k8s.io/utils v0.0.0-20211208161948-7d6a63dca704/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.25/go.mod h1:Mlj9PNLmG9bZ6BHFwFKDo5afkpWyUISkb9Me0GnK66I=
sigs.k8s.io/controller-runtime v0.11.0 h1:DqO+c8mywcZLFJWILq4iktoECTyn30Bkj0CwgqMpZWQ=
sigs.k8s.io/controller-runtime v0.11.0/go.mod h1:KKwLiTooNGu+JmLZGn9Sl3Gjmfj66eMbCQznLP5zcqA=
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y=
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
sigs.k8s.io/structured-merge-diff/v4 v4.2.0 h1:kDvPBbnPk+qYmkHmSo8vKGp438IASWofnbbUKDE/bv0=
sigs.k8s.io/structured-merge-diff/v4 v4.2.0/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=

View File

@ -1,93 +0,0 @@
/*
Copyright 2023 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"path"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// Artifact represents the output of a Source reconciliation.
type Artifact struct {
// Path is the relative file path of the Artifact. It can be used to locate
// the file in the root of the Artifact storage on the local file system of
// the controller managing the Source.
// +required
Path string `json:"path"`
// URL is the HTTP address of the Artifact as exposed by the controller
// managing the Source. It can be used to retrieve the Artifact for
// consumption, e.g. by another controller applying the Artifact contents.
// +required
URL string `json:"url"`
// Revision is a human-readable identifier traceable in the origin source
// system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.
// +required
Revision string `json:"revision"`
// Digest is the digest of the file in the form of '<algorithm>:<checksum>'.
// +optional
// +kubebuilder:validation:Pattern="^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$"
Digest string `json:"digest,omitempty"`
// LastUpdateTime is the timestamp corresponding to the last update of the
// Artifact.
// +required
LastUpdateTime metav1.Time `json:"lastUpdateTime"`
// Size is the number of bytes in the file.
// +optional
Size *int64 `json:"size,omitempty"`
// Metadata holds upstream information such as OCI annotations.
// +optional
Metadata map[string]string `json:"metadata,omitempty"`
}
// HasRevision returns if the given revision matches the current Revision of
// the Artifact.
func (in *Artifact) HasRevision(revision string) bool {
if in == nil {
return false
}
return in.Revision == revision
}
// HasDigest returns if the given digest matches the current Digest of the
// Artifact.
func (in *Artifact) HasDigest(digest string) bool {
if in == nil {
return false
}
return in.Digest == digest
}
// ArtifactDir returns the artifact dir path in the form of
// '<kind>/<namespace>/<name>'.
func ArtifactDir(kind, namespace, name string) string {
kind = strings.ToLower(kind)
return path.Join(kind, namespace, name)
}
// ArtifactPath returns the artifact path in the form of
// '<kind>/<namespace>/name>/<filename>'.
func ArtifactPath(kind, namespace, name, filename string) string {
return path.Join(ArtifactDir(kind, namespace, name), filename)
}

View File

@ -1,271 +0,0 @@
/*
Copyright 2024 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/fluxcd/pkg/apis/meta"
)
const (
// BucketKind is the string representation of a Bucket.
BucketKind = "Bucket"
)
const (
// BucketProviderGeneric for any S3 API compatible storage Bucket.
BucketProviderGeneric string = "generic"
// BucketProviderAmazon for an AWS S3 object storage Bucket.
// Provides support for retrieving credentials from the AWS EC2 service.
BucketProviderAmazon string = "aws"
// BucketProviderGoogle for a Google Cloud Storage Bucket.
// Provides support for authentication using a workload identity.
BucketProviderGoogle string = "gcp"
// BucketProviderAzure for an Azure Blob Storage Bucket.
// Provides support for authentication using a Service Principal,
// Managed Identity or Shared Key.
BucketProviderAzure string = "azure"
)
// BucketSpec specifies the required configuration to produce an Artifact for
// an object storage bucket.
// +kubebuilder:validation:XValidation:rule="self.provider == 'aws' || self.provider == 'generic' || !has(self.sts)", message="STS configuration is only supported for the 'aws' and 'generic' Bucket providers"
// +kubebuilder:validation:XValidation:rule="self.provider != 'aws' || !has(self.sts) || self.sts.provider == 'aws'", message="'aws' is the only supported STS provider for the 'aws' Bucket provider"
// +kubebuilder:validation:XValidation:rule="self.provider != 'generic' || !has(self.sts) || self.sts.provider == 'ldap'", message="'ldap' is the only supported STS provider for the 'generic' Bucket provider"
// +kubebuilder:validation:XValidation:rule="!has(self.sts) || self.sts.provider != 'aws' || !has(self.sts.secretRef)", message="spec.sts.secretRef is not required for the 'aws' STS provider"
// +kubebuilder:validation:XValidation:rule="!has(self.sts) || self.sts.provider != 'aws' || !has(self.sts.certSecretRef)", message="spec.sts.certSecretRef is not required for the 'aws' STS provider"
type BucketSpec struct {
// Provider of the object storage bucket.
// Defaults to 'generic', which expects an S3 (API) compatible object
// storage.
// +kubebuilder:validation:Enum=generic;aws;gcp;azure
// +kubebuilder:default:=generic
// +optional
Provider string `json:"provider,omitempty"`
// BucketName is the name of the object storage bucket.
// +required
BucketName string `json:"bucketName"`
// Endpoint is the object storage address the BucketName is located at.
// +required
Endpoint string `json:"endpoint"`
// STS specifies the required configuration to use a Security Token
// Service for fetching temporary credentials to authenticate in a
// Bucket provider.
//
// This field is only supported for the `aws` and `generic` providers.
// +optional
STS *BucketSTSSpec `json:"sts,omitempty"`
// Insecure allows connecting to a non-TLS HTTP Endpoint.
// +optional
Insecure bool `json:"insecure,omitempty"`
// Region of the Endpoint where the BucketName is located in.
// +optional
Region string `json:"region,omitempty"`
// Prefix to use for server-side filtering of files in the Bucket.
// +optional
Prefix string `json:"prefix,omitempty"`
// SecretRef specifies the Secret containing authentication credentials
// for the Bucket.
// +optional
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
// CertSecretRef can be given the name of a Secret containing
// either or both of
//
// - a PEM-encoded client certificate (`tls.crt`) and private
// key (`tls.key`);
// - a PEM-encoded CA certificate (`ca.crt`)
//
// and whichever are supplied, will be used for connecting to the
// bucket. The client cert and key are useful if you are
// authenticating with a certificate; the CA cert is useful if
// you are using a self-signed server certificate. The Secret must
// be of type `Opaque` or `kubernetes.io/tls`.
//
// This field is only supported for the `generic` provider.
// +optional
CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"`
// ProxySecretRef specifies the Secret containing the proxy configuration
// to use while communicating with the Bucket server.
// +optional
ProxySecretRef *meta.LocalObjectReference `json:"proxySecretRef,omitempty"`
// Interval at which the Bucket Endpoint is checked for updates.
// This interval is approximate and may be subject to jitter to ensure
// efficient use of resources.
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
// +required
Interval metav1.Duration `json:"interval"`
// Timeout for fetch operations, defaults to 60s.
// +kubebuilder:default="60s"
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$"
// +optional
Timeout *metav1.Duration `json:"timeout,omitempty"`
// Ignore overrides the set of excluded patterns in the .sourceignore format
// (which is the same as .gitignore). If not provided, a default will be used,
// consult the documentation for your version to find out what those are.
// +optional
Ignore *string `json:"ignore,omitempty"`
// Suspend tells the controller to suspend the reconciliation of this
// Bucket.
// +optional
Suspend bool `json:"suspend,omitempty"`
}
// BucketSTSSpec specifies the required configuration to use a Security Token
// Service for fetching temporary credentials to authenticate in a Bucket
// provider.
type BucketSTSSpec struct {
// Provider of the Security Token Service.
// +kubebuilder:validation:Enum=aws;ldap
// +required
Provider string `json:"provider"`
// Endpoint is the HTTP/S endpoint of the Security Token Service from
// where temporary credentials will be fetched.
// +required
// +kubebuilder:validation:Pattern="^(http|https)://.*$"
Endpoint string `json:"endpoint"`
// SecretRef specifies the Secret containing authentication credentials
// for the STS endpoint. This Secret must contain the fields `username`
// and `password` and is supported only for the `ldap` provider.
// +optional
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
// CertSecretRef can be given the name of a Secret containing
// either or both of
//
// - a PEM-encoded client certificate (`tls.crt`) and private
// key (`tls.key`);
// - a PEM-encoded CA certificate (`ca.crt`)
//
// and whichever are supplied, will be used for connecting to the
// STS endpoint. The client cert and key are useful if you are
// authenticating with a certificate; the CA cert is useful if
// you are using a self-signed server certificate. The Secret must
// be of type `Opaque` or `kubernetes.io/tls`.
//
// This field is only supported for the `ldap` provider.
// +optional
CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"`
}
// BucketStatus records the observed state of a Bucket.
type BucketStatus struct {
// ObservedGeneration is the last observed generation of the Bucket object.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
// Conditions holds the conditions for the Bucket.
// +optional
Conditions []metav1.Condition `json:"conditions,omitempty"`
// URL is the dynamic fetch link for the latest Artifact.
// It is provided on a "best effort" basis, and using the precise
// BucketStatus.Artifact data is recommended.
// +optional
URL string `json:"url,omitempty"`
// Artifact represents the last successful Bucket reconciliation.
// +optional
Artifact *Artifact `json:"artifact,omitempty"`
// ObservedIgnore is the observed exclusion patterns used for constructing
// the source artifact.
// +optional
ObservedIgnore *string `json:"observedIgnore,omitempty"`
meta.ReconcileRequestStatus `json:",inline"`
}
const (
// BucketOperationSucceededReason signals that the Bucket listing and fetch
// operations succeeded.
BucketOperationSucceededReason string = "BucketOperationSucceeded"
// BucketOperationFailedReason signals that the Bucket listing or fetch
// operations failed.
BucketOperationFailedReason string = "BucketOperationFailed"
)
// GetConditions returns the status conditions of the object.
func (in *Bucket) GetConditions() []metav1.Condition {
return in.Status.Conditions
}
// SetConditions sets the status conditions on the object.
func (in *Bucket) SetConditions(conditions []metav1.Condition) {
in.Status.Conditions = conditions
}
// GetRequeueAfter returns the duration after which the source must be reconciled again.
func (in *Bucket) GetRequeueAfter() time.Duration {
return in.Spec.Interval.Duration
}
// GetArtifact returns the latest artifact from the source if present in the status sub-resource.
func (in *Bucket) GetArtifact() *Artifact {
return in.Status.Artifact
}
// +genclient
// +kubebuilder:storageversion
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Endpoint",type=string,JSONPath=`.spec.endpoint`
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
// Bucket is the Schema for the buckets API.
type Bucket struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec BucketSpec `json:"spec,omitempty"`
// +kubebuilder:default={"observedGeneration":-1}
Status BucketStatus `json:"status,omitempty"`
}
// BucketList contains a list of Bucket objects.
// +kubebuilder:object:root=true
type BucketList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Bucket `json:"items"`
}
func init() {
SchemeBuilder.Register(&Bucket{}, &BucketList{})
}

View File

@ -1,118 +0,0 @@
/*
Copyright 2023 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
const SourceFinalizer = "finalizers.fluxcd.io"
const (
// ArtifactInStorageCondition indicates the availability of the Artifact in
// the storage.
// If True, the Artifact is stored successfully.
// This Condition is only present on the resource if the Artifact is
// successfully stored.
ArtifactInStorageCondition string = "ArtifactInStorage"
// ArtifactOutdatedCondition indicates the current Artifact of the Source
// is outdated.
// This is a "negative polarity" or "abnormal-true" type, and is only
// present on the resource if it is True.
ArtifactOutdatedCondition string = "ArtifactOutdated"
// SourceVerifiedCondition indicates the integrity verification of the
// Source.
// If True, the integrity check succeeded. If False, it failed.
// This Condition is only present on the resource if the integrity check
// is enabled.
SourceVerifiedCondition string = "SourceVerified"
// FetchFailedCondition indicates a transient or persistent fetch failure
// of an upstream Source.
// If True, observations on the upstream Source revision may be impossible,
// and the Artifact available for the Source may be outdated.
// This is a "negative polarity" or "abnormal-true" type, and is only
// present on the resource if it is True.
FetchFailedCondition string = "FetchFailed"
// BuildFailedCondition indicates a transient or persistent build failure
// of a Source's Artifact.
// If True, the Source can be in an ArtifactOutdatedCondition.
// This is a "negative polarity" or "abnormal-true" type, and is only
// present on the resource if it is True.
BuildFailedCondition string = "BuildFailed"
// StorageOperationFailedCondition indicates a transient or persistent
// failure related to storage. If True, the reconciliation failed while
// performing some filesystem operation.
// This is a "negative polarity" or "abnormal-true" type, and is only
// present on the resource if it is True.
StorageOperationFailedCondition string = "StorageOperationFailed"
)
// Reasons are provided as utility, and not part of the declarative API.
const (
// URLInvalidReason signals that a given Source has an invalid URL.
URLInvalidReason string = "URLInvalid"
// AuthenticationFailedReason signals that a Secret does not have the
// required fields, or the provided credentials do not match.
AuthenticationFailedReason string = "AuthenticationFailed"
// VerificationError signals that the Source's verification
// check failed.
VerificationError string = "VerificationError"
// DirCreationFailedReason signals a failure caused by a directory creation
// operation.
DirCreationFailedReason string = "DirectoryCreationFailed"
// StatOperationFailedReason signals a failure caused by a stat operation on
// a path.
StatOperationFailedReason string = "StatOperationFailed"
// ReadOperationFailedReason signals a failure caused by a read operation.
ReadOperationFailedReason string = "ReadOperationFailed"
// AcquireLockFailedReason signals a failure in acquiring lock.
AcquireLockFailedReason string = "AcquireLockFailed"
// InvalidPathReason signals a failure caused by an invalid path.
InvalidPathReason string = "InvalidPath"
// ArchiveOperationFailedReason signals a failure in archive operation.
ArchiveOperationFailedReason string = "ArchiveOperationFailed"
// SymlinkUpdateFailedReason signals a failure in updating a symlink.
SymlinkUpdateFailedReason string = "SymlinkUpdateFailed"
// ArtifactUpToDateReason signals that an existing Artifact is up-to-date
// with the Source.
ArtifactUpToDateReason string = "ArtifactUpToDate"
// CacheOperationFailedReason signals a failure in cache operation.
CacheOperationFailedReason string = "CacheOperationFailed"
// PatchOperationFailedReason signals a failure in patching a kubernetes API
// object.
PatchOperationFailedReason string = "PatchOperationFailed"
// InvalidSTSConfigurationReason signals that the STS configurtion is invalid.
InvalidSTSConfigurationReason string = "InvalidSTSConfiguration"
// InvalidProviderConfigurationReason signals that the provider
// configuration is invalid.
InvalidProviderConfigurationReason string = "InvalidProviderConfiguration"
)

View File

@ -1,378 +0,0 @@
/*
Copyright 2023 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/fluxcd/pkg/apis/meta"
)
const (
// GitRepositoryKind is the string representation of a GitRepository.
GitRepositoryKind = "GitRepository"
// GitProviderGeneric provides support for authentication using
// credentials specified in secretRef.
GitProviderGeneric string = "generic"
// GitProviderAzure provides support for authentication to azure
// repositories using Managed Identity.
GitProviderAzure string = "azure"
// GitProviderGitHub provides support for authentication to git
// repositories using GitHub App authentication
GitProviderGitHub string = "github"
)
const (
// IncludeUnavailableCondition indicates one of the includes is not
// available. For example, because it does not exist, or does not have an
// Artifact.
// This is a "negative polarity" or "abnormal-true" type, and is only
// present on the resource if it is True.
IncludeUnavailableCondition string = "IncludeUnavailable"
)
// GitVerificationMode specifies the verification mode for a Git repository.
type GitVerificationMode string
// Valid checks the validity of the Git verification mode.
func (m GitVerificationMode) Valid() bool {
switch m {
case ModeGitHEAD, ModeGitTag, ModeGitTagAndHEAD:
return true
default:
return false
}
}
const (
// ModeGitHEAD implies that the HEAD of the Git repository (after it has been
// checked out to the required commit) should be verified.
ModeGitHEAD GitVerificationMode = "HEAD"
// ModeGitTag implies that the tag object specified in the checkout configuration
// should be verified.
ModeGitTag GitVerificationMode = "Tag"
// ModeGitTagAndHEAD implies that both the tag object and the commit it points
// to should be verified.
ModeGitTagAndHEAD GitVerificationMode = "TagAndHEAD"
)
// GitRepositorySpec specifies the required configuration to produce an
// Artifact for a Git repository.
type GitRepositorySpec struct {
// URL specifies the Git repository URL, it can be an HTTP/S or SSH address.
// +kubebuilder:validation:Pattern="^(http|https|ssh)://.*$"
// +required
URL string `json:"url"`
// SecretRef specifies the Secret containing authentication credentials for
// the GitRepository.
// For HTTPS repositories the Secret must contain 'username' and 'password'
// fields for basic auth or 'bearerToken' field for token auth.
// For SSH repositories the Secret must contain 'identity'
// and 'known_hosts' fields.
// +optional
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
// Provider used for authentication, can be 'azure', 'github', 'generic'.
// When not specified, defaults to 'generic'.
// +kubebuilder:validation:Enum=generic;azure;github
// +optional
Provider string `json:"provider,omitempty"`
// Interval at which the GitRepository URL is checked for updates.
// This interval is approximate and may be subject to jitter to ensure
// efficient use of resources.
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
// +required
Interval metav1.Duration `json:"interval"`
// Timeout for Git operations like cloning, defaults to 60s.
// +kubebuilder:default="60s"
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$"
// +optional
Timeout *metav1.Duration `json:"timeout,omitempty"`
// Reference specifies the Git reference to resolve and monitor for
// changes, defaults to the 'master' branch.
// +optional
Reference *GitRepositoryRef `json:"ref,omitempty"`
// Verification specifies the configuration to verify the Git commit
// signature(s).
// +optional
Verification *GitRepositoryVerification `json:"verify,omitempty"`
// ProxySecretRef specifies the Secret containing the proxy configuration
// to use while communicating with the Git server.
// +optional
ProxySecretRef *meta.LocalObjectReference `json:"proxySecretRef,omitempty"`
// Ignore overrides the set of excluded patterns in the .sourceignore format
// (which is the same as .gitignore). If not provided, a default will be used,
// consult the documentation for your version to find out what those are.
// +optional
Ignore *string `json:"ignore,omitempty"`
// Suspend tells the controller to suspend the reconciliation of this
// GitRepository.
// +optional
Suspend bool `json:"suspend,omitempty"`
// RecurseSubmodules enables the initialization of all submodules within
// the GitRepository as cloned from the URL, using their default settings.
// +optional
RecurseSubmodules bool `json:"recurseSubmodules,omitempty"`
// Include specifies a list of GitRepository resources which Artifacts
// should be included in the Artifact produced for this GitRepository.
// +optional
Include []GitRepositoryInclude `json:"include,omitempty"`
// SparseCheckout specifies a list of directories to checkout when cloning
// the repository. If specified, only these directories are included in the
// Artifact produced for this GitRepository.
// +optional
SparseCheckout []string `json:"sparseCheckout,omitempty"`
}
// GitRepositoryInclude specifies a local reference to a GitRepository which
// Artifact (sub-)contents must be included, and where they should be placed.
type GitRepositoryInclude struct {
// GitRepositoryRef specifies the GitRepository which Artifact contents
// must be included.
// +required
GitRepositoryRef meta.LocalObjectReference `json:"repository"`
// FromPath specifies the path to copy contents from, defaults to the root
// of the Artifact.
// +optional
FromPath string `json:"fromPath,omitempty"`
// ToPath specifies the path to copy contents to, defaults to the name of
// the GitRepositoryRef.
// +optional
ToPath string `json:"toPath,omitempty"`
}
// GetFromPath returns the specified FromPath.
func (in *GitRepositoryInclude) GetFromPath() string {
return in.FromPath
}
// GetToPath returns the specified ToPath, falling back to the name of the
// GitRepositoryRef.
func (in *GitRepositoryInclude) GetToPath() string {
if in.ToPath == "" {
return in.GitRepositoryRef.Name
}
return in.ToPath
}
// GitRepositoryRef specifies the Git reference to resolve and checkout.
type GitRepositoryRef struct {
// Branch to check out, defaults to 'master' if no other field is defined.
// +optional
Branch string `json:"branch,omitempty"`
// Tag to check out, takes precedence over Branch.
// +optional
Tag string `json:"tag,omitempty"`
// SemVer tag expression to check out, takes precedence over Tag.
// +optional
SemVer string `json:"semver,omitempty"`
// Name of the reference to check out; takes precedence over Branch, Tag and SemVer.
//
// It must be a valid Git reference: https://git-scm.com/docs/git-check-ref-format#_description
// Examples: "refs/heads/main", "refs/tags/v0.1.0", "refs/pull/420/head", "refs/merge-requests/1/head"
// +optional
Name string `json:"name,omitempty"`
// Commit SHA to check out, takes precedence over all reference fields.
//
// This can be combined with Branch to shallow clone the branch, in which
// the commit is expected to exist.
// +optional
Commit string `json:"commit,omitempty"`
}
// GitRepositoryVerification specifies the Git commit signature verification
// strategy.
type GitRepositoryVerification struct {
// Mode specifies which Git object(s) should be verified.
//
// The variants "head" and "HEAD" both imply the same thing, i.e. verify
// the commit that the HEAD of the Git repository points to. The variant
// "head" solely exists to ensure backwards compatibility.
// +kubebuilder:validation:Enum=head;HEAD;Tag;TagAndHEAD
// +optional
// +kubebuilder:default:=HEAD
Mode GitVerificationMode `json:"mode,omitempty"`
// SecretRef specifies the Secret containing the public keys of trusted Git
// authors.
// +required
SecretRef meta.LocalObjectReference `json:"secretRef"`
}
// GitRepositoryStatus records the observed state of a Git repository.
type GitRepositoryStatus struct {
// ObservedGeneration is the last observed generation of the GitRepository
// object.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
// Conditions holds the conditions for the GitRepository.
// +optional
Conditions []metav1.Condition `json:"conditions,omitempty"`
// Artifact represents the last successful GitRepository reconciliation.
// +optional
Artifact *Artifact `json:"artifact,omitempty"`
// IncludedArtifacts contains a list of the last successfully included
// Artifacts as instructed by GitRepositorySpec.Include.
// +optional
IncludedArtifacts []*Artifact `json:"includedArtifacts,omitempty"`
// ObservedIgnore is the observed exclusion patterns used for constructing
// the source artifact.
// +optional
ObservedIgnore *string `json:"observedIgnore,omitempty"`
// ObservedRecurseSubmodules is the observed resource submodules
// configuration used to produce the current Artifact.
// +optional
ObservedRecurseSubmodules bool `json:"observedRecurseSubmodules,omitempty"`
// ObservedInclude is the observed list of GitRepository resources used to
// produce the current Artifact.
// +optional
ObservedInclude []GitRepositoryInclude `json:"observedInclude,omitempty"`
// ObservedSparseCheckout is the observed list of directories used to
// produce the current Artifact.
// +optional
ObservedSparseCheckout []string `json:"observedSparseCheckout,omitempty"`
// SourceVerificationMode is the last used verification mode indicating
// which Git object(s) have been verified.
// +optional
SourceVerificationMode *GitVerificationMode `json:"sourceVerificationMode,omitempty"`
meta.ReconcileRequestStatus `json:",inline"`
}
const (
// GitOperationSucceedReason signals that a Git operation (e.g. clone,
// checkout, etc.) succeeded.
GitOperationSucceedReason string = "GitOperationSucceeded"
// GitOperationFailedReason signals that a Git operation (e.g. clone,
// checkout, etc.) failed.
GitOperationFailedReason string = "GitOperationFailed"
)
// GetConditions returns the status conditions of the object.
func (in GitRepository) GetConditions() []metav1.Condition {
return in.Status.Conditions
}
// SetConditions sets the status conditions on the object.
func (in *GitRepository) SetConditions(conditions []metav1.Condition) {
in.Status.Conditions = conditions
}
// GetRequeueAfter returns the duration after which the GitRepository must be
// reconciled again.
func (in GitRepository) GetRequeueAfter() time.Duration {
return in.Spec.Interval.Duration
}
// GetArtifact returns the latest Artifact from the GitRepository if present in
// the status sub-resource.
func (in *GitRepository) GetArtifact() *Artifact {
return in.Status.Artifact
}
// GetProvider returns the Git authentication provider.
func (v *GitRepository) GetProvider() string {
if v.Spec.Provider == "" {
return GitProviderGeneric
}
return v.Spec.Provider
}
// GetMode returns the declared GitVerificationMode, or a ModeGitHEAD default.
func (v *GitRepositoryVerification) GetMode() GitVerificationMode {
if v.Mode.Valid() {
return v.Mode
}
return ModeGitHEAD
}
// VerifyHEAD returns if the configured mode instructs verification of the
// Git HEAD.
func (v *GitRepositoryVerification) VerifyHEAD() bool {
return v.GetMode() == ModeGitHEAD || v.GetMode() == ModeGitTagAndHEAD
}
// VerifyTag returns if the configured mode instructs verification of the
// Git tag.
func (v *GitRepositoryVerification) VerifyTag() bool {
return v.GetMode() == ModeGitTag || v.GetMode() == ModeGitTagAndHEAD
}
// +genclient
// +kubebuilder:storageversion
// +kubebuilder:object:root=true
// +kubebuilder:resource:shortName=gitrepo
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url`
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
// GitRepository is the Schema for the gitrepositories API.
type GitRepository struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec GitRepositorySpec `json:"spec,omitempty"`
// +kubebuilder:default={"observedGeneration":-1}
Status GitRepositoryStatus `json:"status,omitempty"`
}
// GitRepositoryList contains a list of GitRepository objects.
// +kubebuilder:object:root=true
type GitRepositoryList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []GitRepository `json:"items"`
}
func init() {
SchemeBuilder.Register(&GitRepository{}, &GitRepositoryList{})
}

View File

@ -1,33 +0,0 @@
/*
Copyright 2023 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// GroupVersion is group version used to register these objects.
GroupVersion = schema.GroupVersion{Group: "source.toolkit.fluxcd.io", Version: "v1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme.
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)

View File

@ -1,227 +0,0 @@
/*
Copyright 2024 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/fluxcd/pkg/apis/meta"
)
// HelmChartKind is the string representation of a HelmChart.
const HelmChartKind = "HelmChart"
// HelmChartSpec specifies the desired state of a Helm chart.
type HelmChartSpec struct {
// Chart is the name or path the Helm chart is available at in the
// SourceRef.
// +required
Chart string `json:"chart"`
// Version is the chart version semver expression, ignored for charts from
// GitRepository and Bucket sources. Defaults to latest when omitted.
// +kubebuilder:default:=*
// +optional
Version string `json:"version,omitempty"`
// SourceRef is the reference to the Source the chart is available at.
// +required
SourceRef LocalHelmChartSourceReference `json:"sourceRef"`
// Interval at which the HelmChart SourceRef is checked for updates.
// This interval is approximate and may be subject to jitter to ensure
// efficient use of resources.
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
// +required
Interval metav1.Duration `json:"interval"`
// ReconcileStrategy determines what enables the creation of a new artifact.
// Valid values are ('ChartVersion', 'Revision').
// See the documentation of the values for an explanation on their behavior.
// Defaults to ChartVersion when omitted.
// +kubebuilder:validation:Enum=ChartVersion;Revision
// +kubebuilder:default:=ChartVersion
// +optional
ReconcileStrategy string `json:"reconcileStrategy,omitempty"`
// ValuesFiles is an alternative list of values files to use as the chart
// values (values.yaml is not included by default), expected to be a
// relative path in the SourceRef.
// Values files are merged in the order of this list with the last file
// overriding the first. Ignored when omitted.
// +optional
ValuesFiles []string `json:"valuesFiles,omitempty"`
// IgnoreMissingValuesFiles controls whether to silently ignore missing values
// files rather than failing.
// +optional
IgnoreMissingValuesFiles bool `json:"ignoreMissingValuesFiles,omitempty"`
// Suspend tells the controller to suspend the reconciliation of this
// source.
// +optional
Suspend bool `json:"suspend,omitempty"`
// Verify contains the secret name containing the trusted public keys
// used to verify the signature and specifies which provider to use to check
// whether OCI image is authentic.
// This field is only supported when using HelmRepository source with spec.type 'oci'.
// Chart dependencies, which are not bundled in the umbrella chart artifact, are not verified.
// +optional
Verify *OCIRepositoryVerification `json:"verify,omitempty"`
}
const (
// ReconcileStrategyChartVersion reconciles when the version of the Helm chart is different.
ReconcileStrategyChartVersion string = "ChartVersion"
// ReconcileStrategyRevision reconciles when the Revision of the source is different.
ReconcileStrategyRevision string = "Revision"
)
// LocalHelmChartSourceReference contains enough information to let you locate
// the typed referenced object at namespace level.
type LocalHelmChartSourceReference struct {
// APIVersion of the referent.
// +optional
APIVersion string `json:"apiVersion,omitempty"`
// Kind of the referent, valid values are ('HelmRepository', 'GitRepository',
// 'Bucket').
// +kubebuilder:validation:Enum=HelmRepository;GitRepository;Bucket
// +required
Kind string `json:"kind"`
// Name of the referent.
// +required
Name string `json:"name"`
}
// HelmChartStatus records the observed state of the HelmChart.
type HelmChartStatus struct {
// ObservedGeneration is the last observed generation of the HelmChart
// object.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
// ObservedSourceArtifactRevision is the last observed Artifact.Revision
// of the HelmChartSpec.SourceRef.
// +optional
ObservedSourceArtifactRevision string `json:"observedSourceArtifactRevision,omitempty"`
// ObservedChartName is the last observed chart name as specified by the
// resolved chart reference.
// +optional
ObservedChartName string `json:"observedChartName,omitempty"`
// ObservedValuesFiles are the observed value files of the last successful
// reconciliation.
// It matches the chart in the last successfully reconciled artifact.
// +optional
ObservedValuesFiles []string `json:"observedValuesFiles,omitempty"`
// Conditions holds the conditions for the HelmChart.
// +optional
Conditions []metav1.Condition `json:"conditions,omitempty"`
// URL is the dynamic fetch link for the latest Artifact.
// It is provided on a "best effort" basis, and using the precise
// BucketStatus.Artifact data is recommended.
// +optional
URL string `json:"url,omitempty"`
// Artifact represents the output of the last successful reconciliation.
// +optional
Artifact *Artifact `json:"artifact,omitempty"`
meta.ReconcileRequestStatus `json:",inline"`
}
const (
// ChartPullSucceededReason signals that the pull of the Helm chart
// succeeded.
ChartPullSucceededReason string = "ChartPullSucceeded"
// ChartPackageSucceededReason signals that the package of the Helm
// chart succeeded.
ChartPackageSucceededReason string = "ChartPackageSucceeded"
)
// GetConditions returns the status conditions of the object.
func (in HelmChart) GetConditions() []metav1.Condition {
return in.Status.Conditions
}
// SetConditions sets the status conditions on the object.
func (in *HelmChart) SetConditions(conditions []metav1.Condition) {
in.Status.Conditions = conditions
}
// GetRequeueAfter returns the duration after which the source must be
// reconciled again.
func (in HelmChart) GetRequeueAfter() time.Duration {
return in.Spec.Interval.Duration
}
// GetArtifact returns the latest artifact from the source if present in the
// status sub-resource.
func (in *HelmChart) GetArtifact() *Artifact {
return in.Status.Artifact
}
// GetValuesFiles returns a merged list of HelmChartSpec.ValuesFiles.
func (in *HelmChart) GetValuesFiles() []string {
return in.Spec.ValuesFiles
}
// +genclient
// +kubebuilder:storageversion
// +kubebuilder:object:root=true
// +kubebuilder:resource:shortName=hc
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Chart",type=string,JSONPath=`.spec.chart`
// +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.spec.version`
// +kubebuilder:printcolumn:name="Source Kind",type=string,JSONPath=`.spec.sourceRef.kind`
// +kubebuilder:printcolumn:name="Source Name",type=string,JSONPath=`.spec.sourceRef.name`
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
// HelmChart is the Schema for the helmcharts API.
type HelmChart struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec HelmChartSpec `json:"spec,omitempty"`
// +kubebuilder:default={"observedGeneration":-1}
Status HelmChartStatus `json:"status,omitempty"`
}
// HelmChartList contains a list of HelmChart objects.
// +kubebuilder:object:root=true
type HelmChartList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []HelmChart `json:"items"`
}
func init() {
SchemeBuilder.Register(&HelmChart{}, &HelmChartList{})
}

View File

@ -1,228 +0,0 @@
/*
Copyright 2024 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/fluxcd/pkg/apis/acl"
"github.com/fluxcd/pkg/apis/meta"
)
const (
// HelmRepositoryKind is the string representation of a HelmRepository.
HelmRepositoryKind = "HelmRepository"
// HelmRepositoryURLIndexKey is the key used for indexing HelmRepository
// objects by their HelmRepositorySpec.URL.
HelmRepositoryURLIndexKey = ".metadata.helmRepositoryURL"
// HelmRepositoryTypeDefault is the default HelmRepository type.
// It is used when no type is specified and corresponds to a Helm repository.
HelmRepositoryTypeDefault = "default"
// HelmRepositoryTypeOCI is the type for an OCI repository.
HelmRepositoryTypeOCI = "oci"
)
// HelmRepositorySpec specifies the required configuration to produce an
// Artifact for a Helm repository index YAML.
type HelmRepositorySpec struct {
// URL of the Helm repository, a valid URL contains at least a protocol and
// host.
// +kubebuilder:validation:Pattern="^(http|https|oci)://.*$"
// +required
URL string `json:"url"`
// SecretRef specifies the Secret containing authentication credentials
// for the HelmRepository.
// For HTTP/S basic auth the secret must contain 'username' and 'password'
// fields.
// Support for TLS auth using the 'certFile' and 'keyFile', and/or 'caFile'
// keys is deprecated. Please use `.spec.certSecretRef` instead.
// +optional
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
// CertSecretRef can be given the name of a Secret containing
// either or both of
//
// - a PEM-encoded client certificate (`tls.crt`) and private
// key (`tls.key`);
// - a PEM-encoded CA certificate (`ca.crt`)
//
// and whichever are supplied, will be used for connecting to the
// registry. The client cert and key are useful if you are
// authenticating with a certificate; the CA cert is useful if
// you are using a self-signed server certificate. The Secret must
// be of type `Opaque` or `kubernetes.io/tls`.
//
// It takes precedence over the values specified in the Secret referred
// to by `.spec.secretRef`.
// +optional
CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"`
// PassCredentials allows the credentials from the SecretRef to be passed
// on to a host that does not match the host as defined in URL.
// This may be required if the host of the advertised chart URLs in the
// index differ from the defined URL.
// Enabling this should be done with caution, as it can potentially result
// in credentials getting stolen in a MITM-attack.
// +optional
PassCredentials bool `json:"passCredentials,omitempty"`
// Interval at which the HelmRepository URL is checked for updates.
// This interval is approximate and may be subject to jitter to ensure
// efficient use of resources.
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
// +optional
Interval metav1.Duration `json:"interval,omitempty"`
// Insecure allows connecting to a non-TLS HTTP container registry.
// This field is only taken into account if the .spec.type field is set to 'oci'.
// +optional
Insecure bool `json:"insecure,omitempty"`
// Timeout is used for the index fetch operation for an HTTPS helm repository,
// and for remote OCI Repository operations like pulling for an OCI helm
// chart by the associated HelmChart.
// Its default value is 60s.
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$"
// +optional
Timeout *metav1.Duration `json:"timeout,omitempty"`
// Suspend tells the controller to suspend the reconciliation of this
// HelmRepository.
// +optional
Suspend bool `json:"suspend,omitempty"`
// AccessFrom specifies an Access Control List for allowing cross-namespace
// references to this object.
// NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092
// +optional
AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"`
// Type of the HelmRepository.
// When this field is set to "oci", the URL field value must be prefixed with "oci://".
// +kubebuilder:validation:Enum=default;oci
// +optional
Type string `json:"type,omitempty"`
// Provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'.
// This field is optional, and only taken into account if the .spec.type field is set to 'oci'.
// When not specified, defaults to 'generic'.
// +kubebuilder:validation:Enum=generic;aws;azure;gcp
// +kubebuilder:default:=generic
// +optional
Provider string `json:"provider,omitempty"`
}
// HelmRepositoryStatus records the observed state of the HelmRepository.
type HelmRepositoryStatus struct {
// ObservedGeneration is the last observed generation of the HelmRepository
// object.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
// Conditions holds the conditions for the HelmRepository.
// +optional
Conditions []metav1.Condition `json:"conditions,omitempty"`
// URL is the dynamic fetch link for the latest Artifact.
// It is provided on a "best effort" basis, and using the precise
// HelmRepositoryStatus.Artifact data is recommended.
// +optional
URL string `json:"url,omitempty"`
// Artifact represents the last successful HelmRepository reconciliation.
// +optional
Artifact *Artifact `json:"artifact,omitempty"`
meta.ReconcileRequestStatus `json:",inline"`
}
const (
// IndexationFailedReason signals that the HelmRepository index fetch
// failed.
IndexationFailedReason string = "IndexationFailed"
)
// GetConditions returns the status conditions of the object.
func (in HelmRepository) GetConditions() []metav1.Condition {
return in.Status.Conditions
}
// SetConditions sets the status conditions on the object.
func (in *HelmRepository) SetConditions(conditions []metav1.Condition) {
in.Status.Conditions = conditions
}
// GetRequeueAfter returns the duration after which the source must be
// reconciled again.
func (in HelmRepository) GetRequeueAfter() time.Duration {
if in.Spec.Interval.Duration != 0 {
return in.Spec.Interval.Duration
}
return time.Minute
}
// GetTimeout returns the timeout duration used for various operations related
// to this HelmRepository.
func (in HelmRepository) GetTimeout() time.Duration {
if in.Spec.Timeout != nil {
return in.Spec.Timeout.Duration
}
return time.Minute
}
// GetArtifact returns the latest artifact from the source if present in the
// status sub-resource.
func (in *HelmRepository) GetArtifact() *Artifact {
return in.Status.Artifact
}
// +genclient
// +kubebuilder:storageversion
// +kubebuilder:object:root=true
// +kubebuilder:resource:shortName=helmrepo
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url`
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
// HelmRepository is the Schema for the helmrepositories API.
type HelmRepository struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec HelmRepositorySpec `json:"spec,omitempty"`
// +kubebuilder:default={"observedGeneration":-1}
Status HelmRepositoryStatus `json:"status,omitempty"`
}
// HelmRepositoryList contains a list of HelmRepository objects.
// +kubebuilder:object:root=true
type HelmRepositoryList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []HelmRepository `json:"items"`
}
func init() {
SchemeBuilder.Register(&HelmRepository{}, &HelmRepositoryList{})
}

View File

@ -1,296 +0,0 @@
/*
Copyright 2025 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/fluxcd/pkg/apis/meta"
)
const (
// OCIRepositoryKind is the string representation of an OCIRepository.
OCIRepositoryKind = "OCIRepository"
// OCIRepositoryPrefix is the prefix used for OCIRepository URLs.
OCIRepositoryPrefix = "oci://"
// GenericOCIProvider provides support for authentication using static credentials
// for any OCI compatible API such as Docker Registry, GitHub Container Registry,
// Docker Hub, Quay, etc.
GenericOCIProvider string = "generic"
// AmazonOCIProvider provides support for OCI authentication using AWS IRSA.
AmazonOCIProvider string = "aws"
// GoogleOCIProvider provides support for OCI authentication using GCP workload identity.
GoogleOCIProvider string = "gcp"
// AzureOCIProvider provides support for OCI authentication using a Azure Service Principal,
// Managed Identity or Shared Key.
AzureOCIProvider string = "azure"
// OCILayerExtract defines the operation type for extracting the content from an OCI artifact layer.
OCILayerExtract = "extract"
// OCILayerCopy defines the operation type for copying the content from an OCI artifact layer.
OCILayerCopy = "copy"
)
// OCIRepositorySpec defines the desired state of OCIRepository
type OCIRepositorySpec struct {
// URL is a reference to an OCI artifact repository hosted
// on a remote container registry.
// +kubebuilder:validation:Pattern="^oci://.*$"
// +required
URL string `json:"url"`
// The OCI reference to pull and monitor for changes,
// defaults to the latest tag.
// +optional
Reference *OCIRepositoryRef `json:"ref,omitempty"`
// LayerSelector specifies which layer should be extracted from the OCI artifact.
// When not specified, the first layer found in the artifact is selected.
// +optional
LayerSelector *OCILayerSelector `json:"layerSelector,omitempty"`
// The provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'.
// When not specified, defaults to 'generic'.
// +kubebuilder:validation:Enum=generic;aws;azure;gcp
// +kubebuilder:default:=generic
// +optional
Provider string `json:"provider,omitempty"`
// SecretRef contains the secret name containing the registry login
// credentials to resolve image metadata.
// The secret must be of type kubernetes.io/dockerconfigjson.
// +optional
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
// Verify contains the secret name containing the trusted public keys
// used to verify the signature and specifies which provider to use to check
// whether OCI image is authentic.
// +optional
Verify *OCIRepositoryVerification `json:"verify,omitempty"`
// ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate
// the image pull if the service account has attached pull secrets. For more information:
// https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account
// +optional
ServiceAccountName string `json:"serviceAccountName,omitempty"`
// CertSecretRef can be given the name of a Secret containing
// either or both of
//
// - a PEM-encoded client certificate (`tls.crt`) and private
// key (`tls.key`);
// - a PEM-encoded CA certificate (`ca.crt`)
//
// and whichever are supplied, will be used for connecting to the
// registry. The client cert and key are useful if you are
// authenticating with a certificate; the CA cert is useful if
// you are using a self-signed server certificate. The Secret must
// be of type `Opaque` or `kubernetes.io/tls`.
// +optional
CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"`
// ProxySecretRef specifies the Secret containing the proxy configuration
// to use while communicating with the container registry.
// +optional
ProxySecretRef *meta.LocalObjectReference `json:"proxySecretRef,omitempty"`
// Interval at which the OCIRepository URL is checked for updates.
// This interval is approximate and may be subject to jitter to ensure
// efficient use of resources.
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
// +required
Interval metav1.Duration `json:"interval"`
// The timeout for remote OCI Repository operations like pulling, defaults to 60s.
// +kubebuilder:default="60s"
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$"
// +optional
Timeout *metav1.Duration `json:"timeout,omitempty"`
// Ignore overrides the set of excluded patterns in the .sourceignore format
// (which is the same as .gitignore). If not provided, a default will be used,
// consult the documentation for your version to find out what those are.
// +optional
Ignore *string `json:"ignore,omitempty"`
// Insecure allows connecting to a non-TLS HTTP container registry.
// +optional
Insecure bool `json:"insecure,omitempty"`
// This flag tells the controller to suspend the reconciliation of this source.
// +optional
Suspend bool `json:"suspend,omitempty"`
}
// OCIRepositoryRef defines the image reference for the OCIRepository's URL
type OCIRepositoryRef struct {
// Digest is the image digest to pull, takes precedence over SemVer.
// The value should be in the format 'sha256:<HASH>'.
// +optional
Digest string `json:"digest,omitempty"`
// SemVer is the range of tags to pull selecting the latest within
// the range, takes precedence over Tag.
// +optional
SemVer string `json:"semver,omitempty"`
// SemverFilter is a regex pattern to filter the tags within the SemVer range.
// +optional
SemverFilter string `json:"semverFilter,omitempty"`
// Tag is the image tag to pull, defaults to latest.
// +optional
Tag string `json:"tag,omitempty"`
}
// OCILayerSelector specifies which layer should be extracted from an OCI Artifact
type OCILayerSelector struct {
// MediaType specifies the OCI media type of the layer
// which should be extracted from the OCI Artifact. The
// first layer matching this type is selected.
// +optional
MediaType string `json:"mediaType,omitempty"`
// Operation specifies how the selected layer should be processed.
// By default, the layer compressed content is extracted to storage.
// When the operation is set to 'copy', the layer compressed content
// is persisted to storage as it is.
// +kubebuilder:validation:Enum=extract;copy
// +optional
Operation string `json:"operation,omitempty"`
}
// OCIRepositoryStatus defines the observed state of OCIRepository
type OCIRepositoryStatus struct {
// ObservedGeneration is the last observed generation.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
// Conditions holds the conditions for the OCIRepository.
// +optional
Conditions []metav1.Condition `json:"conditions,omitempty"`
// URL is the download link for the artifact output of the last OCI Repository sync.
// +optional
URL string `json:"url,omitempty"`
// Artifact represents the output of the last successful OCI Repository sync.
// +optional
Artifact *Artifact `json:"artifact,omitempty"`
// ObservedIgnore is the observed exclusion patterns used for constructing
// the source artifact.
// +optional
ObservedIgnore *string `json:"observedIgnore,omitempty"`
// ObservedLayerSelector is the observed layer selector used for constructing
// the source artifact.
// +optional
ObservedLayerSelector *OCILayerSelector `json:"observedLayerSelector,omitempty"`
meta.ReconcileRequestStatus `json:",inline"`
}
const (
// OCIPullFailedReason signals that a pull operation failed.
OCIPullFailedReason string = "OCIArtifactPullFailed"
// OCILayerOperationFailedReason signals that an OCI layer operation failed.
OCILayerOperationFailedReason string = "OCIArtifactLayerOperationFailed"
)
// GetConditions returns the status conditions of the object.
func (in OCIRepository) GetConditions() []metav1.Condition {
return in.Status.Conditions
}
// SetConditions sets the status conditions on the object.
func (in *OCIRepository) SetConditions(conditions []metav1.Condition) {
in.Status.Conditions = conditions
}
// GetRequeueAfter returns the duration after which the OCIRepository must be
// reconciled again.
func (in OCIRepository) GetRequeueAfter() time.Duration {
return in.Spec.Interval.Duration
}
// GetArtifact returns the latest Artifact from the OCIRepository if present in
// the status sub-resource.
func (in *OCIRepository) GetArtifact() *Artifact {
return in.Status.Artifact
}
// GetLayerMediaType returns the media type layer selector if found in spec.
func (in *OCIRepository) GetLayerMediaType() string {
if in.Spec.LayerSelector == nil {
return ""
}
return in.Spec.LayerSelector.MediaType
}
// GetLayerOperation returns the layer selector operation (defaults to extract).
func (in *OCIRepository) GetLayerOperation() string {
if in.Spec.LayerSelector == nil || in.Spec.LayerSelector.Operation == "" {
return OCILayerExtract
}
return in.Spec.LayerSelector.Operation
}
// +genclient
// +kubebuilder:storageversion
// +kubebuilder:object:root=true
// +kubebuilder:resource:shortName=ocirepo
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url`
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
// OCIRepository is the Schema for the ocirepositories API
type OCIRepository struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec OCIRepositorySpec `json:"spec,omitempty"`
// +kubebuilder:default={"observedGeneration":-1}
Status OCIRepositoryStatus `json:"status,omitempty"`
}
// OCIRepositoryList contains a list of OCIRepository
// +kubebuilder:object:root=true
type OCIRepositoryList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []OCIRepository `json:"items"`
}
func init() {
SchemeBuilder.Register(&OCIRepository{}, &OCIRepositoryList{})
}

View File

@ -1,56 +0,0 @@
/*
Copyright 2024 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"github.com/fluxcd/pkg/apis/meta"
)
// OCIRepositoryVerification verifies the authenticity of an OCI Artifact
type OCIRepositoryVerification struct {
// Provider specifies the technology used to sign the OCI Artifact.
// +kubebuilder:validation:Enum=cosign;notation
// +kubebuilder:default:=cosign
Provider string `json:"provider"`
// SecretRef specifies the Kubernetes Secret containing the
// trusted public keys.
// +optional
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
// MatchOIDCIdentity specifies the identity matching criteria to use
// while verifying an OCI artifact which was signed using Cosign keyless
// signing. The artifact's identity is deemed to be verified if any of the
// specified matchers match against the identity.
// +optional
MatchOIDCIdentity []OIDCIdentityMatch `json:"matchOIDCIdentity,omitempty"`
}
// OIDCIdentityMatch specifies options for verifying the certificate identity,
// i.e. the issuer and the subject of the certificate.
type OIDCIdentityMatch struct {
// Issuer specifies the regex pattern to match against to verify
// the OIDC issuer in the Fulcio certificate. The pattern must be a
// valid Go regular expression.
// +required
Issuer string `json:"issuer"`
// Subject specifies the regex pattern to match against to verify
// the identity subject in the Fulcio certificate. The pattern must
// be a valid Go regular expression.
// +required
Subject string `json:"subject"`
}

View File

@ -1,45 +0,0 @@
/*
Copyright 2023 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"time"
"k8s.io/apimachinery/pkg/runtime"
)
const (
// SourceIndexKey is the key used for indexing objects based on their
// referenced Source.
SourceIndexKey string = ".metadata.source"
)
// Source interface must be supported by all API types.
// Source is the interface that provides generic access to the Artifact and
// interval. It must be supported by all kinds of the source.toolkit.fluxcd.io
// API group.
//
// +k8s:deepcopy-gen=false
type Source interface {
runtime.Object
// GetRequeueAfter returns the duration after which the source must be
// reconciled again.
GetRequeueAfter() time.Duration
// GetArtifact returns the latest artifact from the source if present in
// the status sub-resource.
GetArtifact() *Artifact
}

View File

@ -1,26 +0,0 @@
/*
Copyright 2024 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
const (
// STSProviderAmazon represents the AWS provider for Security Token Service.
// Provides support for fetching temporary credentials from an AWS STS endpoint.
STSProviderAmazon string = "aws"
// STSProviderLDAP represents the LDAP provider for Security Token Service.
// Provides support for fetching temporary credentials from an LDAP endpoint.
STSProviderLDAP string = "ldap"
)

View File

@ -1,920 +0,0 @@
//go:build !ignore_autogenerated
/*
Copyright 2024 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package v1
import (
"github.com/fluxcd/pkg/apis/acl"
"github.com/fluxcd/pkg/apis/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Artifact) DeepCopyInto(out *Artifact) {
*out = *in
in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
if in.Size != nil {
in, out := &in.Size, &out.Size
*out = new(int64)
**out = **in
}
if in.Metadata != nil {
in, out := &in.Metadata, &out.Metadata
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Artifact.
func (in *Artifact) DeepCopy() *Artifact {
if in == nil {
return nil
}
out := new(Artifact)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Bucket) DeepCopyInto(out *Bucket) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Bucket.
func (in *Bucket) DeepCopy() *Bucket {
if in == nil {
return nil
}
out := new(Bucket)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Bucket) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BucketList) DeepCopyInto(out *BucketList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Bucket, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketList.
func (in *BucketList) DeepCopy() *BucketList {
if in == nil {
return nil
}
out := new(BucketList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *BucketList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BucketSTSSpec) DeepCopyInto(out *BucketSTSSpec) {
*out = *in
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
if in.CertSecretRef != nil {
in, out := &in.CertSecretRef, &out.CertSecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketSTSSpec.
func (in *BucketSTSSpec) DeepCopy() *BucketSTSSpec {
if in == nil {
return nil
}
out := new(BucketSTSSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BucketSpec) DeepCopyInto(out *BucketSpec) {
*out = *in
if in.STS != nil {
in, out := &in.STS, &out.STS
*out = new(BucketSTSSpec)
(*in).DeepCopyInto(*out)
}
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
if in.CertSecretRef != nil {
in, out := &in.CertSecretRef, &out.CertSecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
if in.ProxySecretRef != nil {
in, out := &in.ProxySecretRef, &out.ProxySecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
out.Interval = in.Interval
if in.Timeout != nil {
in, out := &in.Timeout, &out.Timeout
*out = new(metav1.Duration)
**out = **in
}
if in.Ignore != nil {
in, out := &in.Ignore, &out.Ignore
*out = new(string)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketSpec.
func (in *BucketSpec) DeepCopy() *BucketSpec {
if in == nil {
return nil
}
out := new(BucketSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BucketStatus) DeepCopyInto(out *BucketStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]metav1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Artifact != nil {
in, out := &in.Artifact, &out.Artifact
*out = new(Artifact)
(*in).DeepCopyInto(*out)
}
if in.ObservedIgnore != nil {
in, out := &in.ObservedIgnore, &out.ObservedIgnore
*out = new(string)
**out = **in
}
out.ReconcileRequestStatus = in.ReconcileRequestStatus
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketStatus.
func (in *BucketStatus) DeepCopy() *BucketStatus {
if in == nil {
return nil
}
out := new(BucketStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GitRepository) DeepCopyInto(out *GitRepository) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepository.
func (in *GitRepository) DeepCopy() *GitRepository {
if in == nil {
return nil
}
out := new(GitRepository)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *GitRepository) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GitRepositoryInclude) DeepCopyInto(out *GitRepositoryInclude) {
*out = *in
out.GitRepositoryRef = in.GitRepositoryRef
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryInclude.
func (in *GitRepositoryInclude) DeepCopy() *GitRepositoryInclude {
if in == nil {
return nil
}
out := new(GitRepositoryInclude)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GitRepositoryList) DeepCopyInto(out *GitRepositoryList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]GitRepository, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryList.
func (in *GitRepositoryList) DeepCopy() *GitRepositoryList {
if in == nil {
return nil
}
out := new(GitRepositoryList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *GitRepositoryList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GitRepositoryRef) DeepCopyInto(out *GitRepositoryRef) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryRef.
func (in *GitRepositoryRef) DeepCopy() *GitRepositoryRef {
if in == nil {
return nil
}
out := new(GitRepositoryRef)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GitRepositorySpec) DeepCopyInto(out *GitRepositorySpec) {
*out = *in
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
out.Interval = in.Interval
if in.Timeout != nil {
in, out := &in.Timeout, &out.Timeout
*out = new(metav1.Duration)
**out = **in
}
if in.Reference != nil {
in, out := &in.Reference, &out.Reference
*out = new(GitRepositoryRef)
**out = **in
}
if in.Verification != nil {
in, out := &in.Verification, &out.Verification
*out = new(GitRepositoryVerification)
**out = **in
}
if in.ProxySecretRef != nil {
in, out := &in.ProxySecretRef, &out.ProxySecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
if in.Ignore != nil {
in, out := &in.Ignore, &out.Ignore
*out = new(string)
**out = **in
}
if in.Include != nil {
in, out := &in.Include, &out.Include
*out = make([]GitRepositoryInclude, len(*in))
copy(*out, *in)
}
if in.SparseCheckout != nil {
in, out := &in.SparseCheckout, &out.SparseCheckout
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositorySpec.
func (in *GitRepositorySpec) DeepCopy() *GitRepositorySpec {
if in == nil {
return nil
}
out := new(GitRepositorySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GitRepositoryStatus) DeepCopyInto(out *GitRepositoryStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]metav1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Artifact != nil {
in, out := &in.Artifact, &out.Artifact
*out = new(Artifact)
(*in).DeepCopyInto(*out)
}
if in.IncludedArtifacts != nil {
in, out := &in.IncludedArtifacts, &out.IncludedArtifacts
*out = make([]*Artifact, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(Artifact)
(*in).DeepCopyInto(*out)
}
}
}
if in.ObservedIgnore != nil {
in, out := &in.ObservedIgnore, &out.ObservedIgnore
*out = new(string)
**out = **in
}
if in.ObservedInclude != nil {
in, out := &in.ObservedInclude, &out.ObservedInclude
*out = make([]GitRepositoryInclude, len(*in))
copy(*out, *in)
}
if in.ObservedSparseCheckout != nil {
in, out := &in.ObservedSparseCheckout, &out.ObservedSparseCheckout
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.SourceVerificationMode != nil {
in, out := &in.SourceVerificationMode, &out.SourceVerificationMode
*out = new(GitVerificationMode)
**out = **in
}
out.ReconcileRequestStatus = in.ReconcileRequestStatus
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryStatus.
func (in *GitRepositoryStatus) DeepCopy() *GitRepositoryStatus {
if in == nil {
return nil
}
out := new(GitRepositoryStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GitRepositoryVerification) DeepCopyInto(out *GitRepositoryVerification) {
*out = *in
out.SecretRef = in.SecretRef
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryVerification.
func (in *GitRepositoryVerification) DeepCopy() *GitRepositoryVerification {
if in == nil {
return nil
}
out := new(GitRepositoryVerification)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HelmChart) DeepCopyInto(out *HelmChart) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChart.
func (in *HelmChart) DeepCopy() *HelmChart {
if in == nil {
return nil
}
out := new(HelmChart)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *HelmChart) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HelmChartList) DeepCopyInto(out *HelmChartList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]HelmChart, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartList.
func (in *HelmChartList) DeepCopy() *HelmChartList {
if in == nil {
return nil
}
out := new(HelmChartList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *HelmChartList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HelmChartSpec) DeepCopyInto(out *HelmChartSpec) {
*out = *in
out.SourceRef = in.SourceRef
out.Interval = in.Interval
if in.ValuesFiles != nil {
in, out := &in.ValuesFiles, &out.ValuesFiles
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Verify != nil {
in, out := &in.Verify, &out.Verify
*out = new(OCIRepositoryVerification)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartSpec.
func (in *HelmChartSpec) DeepCopy() *HelmChartSpec {
if in == nil {
return nil
}
out := new(HelmChartSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HelmChartStatus) DeepCopyInto(out *HelmChartStatus) {
*out = *in
if in.ObservedValuesFiles != nil {
in, out := &in.ObservedValuesFiles, &out.ObservedValuesFiles
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]metav1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Artifact != nil {
in, out := &in.Artifact, &out.Artifact
*out = new(Artifact)
(*in).DeepCopyInto(*out)
}
out.ReconcileRequestStatus = in.ReconcileRequestStatus
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartStatus.
func (in *HelmChartStatus) DeepCopy() *HelmChartStatus {
if in == nil {
return nil
}
out := new(HelmChartStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HelmRepository) DeepCopyInto(out *HelmRepository) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepository.
func (in *HelmRepository) DeepCopy() *HelmRepository {
if in == nil {
return nil
}
out := new(HelmRepository)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *HelmRepository) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HelmRepositoryList) DeepCopyInto(out *HelmRepositoryList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]HelmRepository, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepositoryList.
func (in *HelmRepositoryList) DeepCopy() *HelmRepositoryList {
if in == nil {
return nil
}
out := new(HelmRepositoryList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *HelmRepositoryList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HelmRepositorySpec) DeepCopyInto(out *HelmRepositorySpec) {
*out = *in
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
if in.CertSecretRef != nil {
in, out := &in.CertSecretRef, &out.CertSecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
out.Interval = in.Interval
if in.Timeout != nil {
in, out := &in.Timeout, &out.Timeout
*out = new(metav1.Duration)
**out = **in
}
if in.AccessFrom != nil {
in, out := &in.AccessFrom, &out.AccessFrom
*out = new(acl.AccessFrom)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepositorySpec.
func (in *HelmRepositorySpec) DeepCopy() *HelmRepositorySpec {
if in == nil {
return nil
}
out := new(HelmRepositorySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HelmRepositoryStatus) DeepCopyInto(out *HelmRepositoryStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]metav1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Artifact != nil {
in, out := &in.Artifact, &out.Artifact
*out = new(Artifact)
(*in).DeepCopyInto(*out)
}
out.ReconcileRequestStatus = in.ReconcileRequestStatus
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepositoryStatus.
func (in *HelmRepositoryStatus) DeepCopy() *HelmRepositoryStatus {
if in == nil {
return nil
}
out := new(HelmRepositoryStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LocalHelmChartSourceReference) DeepCopyInto(out *LocalHelmChartSourceReference) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalHelmChartSourceReference.
func (in *LocalHelmChartSourceReference) DeepCopy() *LocalHelmChartSourceReference {
if in == nil {
return nil
}
out := new(LocalHelmChartSourceReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OCILayerSelector) DeepCopyInto(out *OCILayerSelector) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCILayerSelector.
func (in *OCILayerSelector) DeepCopy() *OCILayerSelector {
if in == nil {
return nil
}
out := new(OCILayerSelector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OCIRepository) DeepCopyInto(out *OCIRepository) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepository.
func (in *OCIRepository) DeepCopy() *OCIRepository {
if in == nil {
return nil
}
out := new(OCIRepository)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *OCIRepository) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OCIRepositoryList) DeepCopyInto(out *OCIRepositoryList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]OCIRepository, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryList.
func (in *OCIRepositoryList) DeepCopy() *OCIRepositoryList {
if in == nil {
return nil
}
out := new(OCIRepositoryList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *OCIRepositoryList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OCIRepositoryRef) DeepCopyInto(out *OCIRepositoryRef) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryRef.
func (in *OCIRepositoryRef) DeepCopy() *OCIRepositoryRef {
if in == nil {
return nil
}
out := new(OCIRepositoryRef)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OCIRepositorySpec) DeepCopyInto(out *OCIRepositorySpec) {
*out = *in
if in.Reference != nil {
in, out := &in.Reference, &out.Reference
*out = new(OCIRepositoryRef)
**out = **in
}
if in.LayerSelector != nil {
in, out := &in.LayerSelector, &out.LayerSelector
*out = new(OCILayerSelector)
**out = **in
}
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
if in.Verify != nil {
in, out := &in.Verify, &out.Verify
*out = new(OCIRepositoryVerification)
(*in).DeepCopyInto(*out)
}
if in.CertSecretRef != nil {
in, out := &in.CertSecretRef, &out.CertSecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
if in.ProxySecretRef != nil {
in, out := &in.ProxySecretRef, &out.ProxySecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
out.Interval = in.Interval
if in.Timeout != nil {
in, out := &in.Timeout, &out.Timeout
*out = new(metav1.Duration)
**out = **in
}
if in.Ignore != nil {
in, out := &in.Ignore, &out.Ignore
*out = new(string)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositorySpec.
func (in *OCIRepositorySpec) DeepCopy() *OCIRepositorySpec {
if in == nil {
return nil
}
out := new(OCIRepositorySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OCIRepositoryStatus) DeepCopyInto(out *OCIRepositoryStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]metav1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Artifact != nil {
in, out := &in.Artifact, &out.Artifact
*out = new(Artifact)
(*in).DeepCopyInto(*out)
}
if in.ObservedIgnore != nil {
in, out := &in.ObservedIgnore, &out.ObservedIgnore
*out = new(string)
**out = **in
}
if in.ObservedLayerSelector != nil {
in, out := &in.ObservedLayerSelector, &out.ObservedLayerSelector
*out = new(OCILayerSelector)
**out = **in
}
out.ReconcileRequestStatus = in.ReconcileRequestStatus
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryStatus.
func (in *OCIRepositoryStatus) DeepCopy() *OCIRepositoryStatus {
if in == nil {
return nil
}
out := new(OCIRepositoryStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OCIRepositoryVerification) DeepCopyInto(out *OCIRepositoryVerification) {
*out = *in
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
if in.MatchOIDCIdentity != nil {
in, out := &in.MatchOIDCIdentity, &out.MatchOIDCIdentity
*out = make([]OIDCIdentityMatch, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryVerification.
func (in *OCIRepositoryVerification) DeepCopy() *OCIRepositoryVerification {
if in == nil {
return nil
}
out := new(OCIRepositoryVerification)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OIDCIdentityMatch) DeepCopyInto(out *OIDCIdentityMatch) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCIdentityMatch.
func (in *OIDCIdentityMatch) DeepCopy() *OIDCIdentityMatch {
if in == nil {
return nil
}
out := new(OIDCIdentityMatch)
in.DeepCopyInto(out)
return out
}

View File

@ -126,13 +126,7 @@ func BucketProgressing(bucket Bucket) Bucket {
bucket.Status.ObservedGeneration = bucket.Generation
bucket.Status.URL = ""
bucket.Status.Conditions = []metav1.Condition{}
newCondition := metav1.Condition{
Type: meta.ReadyCondition,
Status: metav1.ConditionUnknown,
Reason: meta.ProgressingReason,
Message: "reconciliation in progress",
}
apimeta.SetStatusCondition(bucket.GetStatusConditions(), newCondition)
meta.SetResourceCondition(&bucket, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress")
return bucket
}
@ -142,26 +136,14 @@ func BucketProgressing(bucket Bucket) Bucket {
func BucketReady(bucket Bucket, artifact Artifact, url, reason, message string) Bucket {
bucket.Status.Artifact = &artifact
bucket.Status.URL = url
newCondition := metav1.Condition{
Type: meta.ReadyCondition,
Status: metav1.ConditionTrue,
Reason: reason,
Message: message,
}
apimeta.SetStatusCondition(bucket.GetStatusConditions(), newCondition)
meta.SetResourceCondition(&bucket, meta.ReadyCondition, metav1.ConditionTrue, reason, message)
return bucket
}
// BucketNotReady sets the meta.ReadyCondition on the Bucket to 'False', with
// the given reason and message. It returns the modified Bucket.
func BucketNotReady(bucket Bucket, reason, message string) Bucket {
newCondition := metav1.Condition{
Type: meta.ReadyCondition,
Status: metav1.ConditionFalse,
Reason: reason,
Message: message,
}
apimeta.SetStatusCondition(bucket.GetStatusConditions(), newCondition)
meta.SetResourceCondition(&bucket, meta.ReadyCondition, metav1.ConditionFalse, reason, message)
return bucket
}
@ -193,9 +175,9 @@ func (in *Bucket) GetInterval() metav1.Duration {
}
// +genclient
// +genclient:Namespaced
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:deprecatedversion:warning="v1beta1 Bucket is deprecated, upgrade to v1"
// +kubebuilder:printcolumn:name="Endpoint",type=string,JSONPath=`.spec.endpoint`
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""

View File

@ -37,15 +37,15 @@ const (
// GitRepositorySpec defines the desired state of a Git repository.
type GitRepositorySpec struct {
// The repository URL, can be a HTTP/S or SSH address.
// +kubebuilder:validation:Pattern="^(http|https|ssh)://.*$"
// +kubebuilder:validation:Pattern="^(http|https|ssh)://"
// +required
URL string `json:"url"`
// The secret name containing the Git credentials.
// For HTTPS repositories the secret must contain username and password
// fields.
// For SSH repositories the secret must contain identity and known_hosts
// fields.
// For SSH repositories the secret must contain identity, identity.pub and
// known_hosts fields.
// +optional
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
@ -196,13 +196,7 @@ func GitRepositoryProgressing(repository GitRepository) GitRepository {
repository.Status.ObservedGeneration = repository.Generation
repository.Status.URL = ""
repository.Status.Conditions = []metav1.Condition{}
newCondition := metav1.Condition{
Type: meta.ReadyCondition,
Status: metav1.ConditionUnknown,
Reason: meta.ProgressingReason,
Message: "reconciliation in progress",
}
apimeta.SetStatusCondition(repository.GetStatusConditions(), newCondition)
meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress")
return repository
}
@ -213,13 +207,7 @@ func GitRepositoryReady(repository GitRepository, artifact Artifact, includedArt
repository.Status.Artifact = &artifact
repository.Status.IncludedArtifacts = includedArtifacts
repository.Status.URL = url
newCondition := metav1.Condition{
Type: meta.ReadyCondition,
Status: metav1.ConditionTrue,
Reason: reason,
Message: message,
}
apimeta.SetStatusCondition(repository.GetStatusConditions(), newCondition)
meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionTrue, reason, message)
return repository
}
@ -227,13 +215,7 @@ func GitRepositoryReady(repository GitRepository, artifact Artifact, includedArt
// to 'False', with the given reason and message. It returns the modified
// GitRepository.
func GitRepositoryNotReady(repository GitRepository, reason, message string) GitRepository {
newCondition := metav1.Condition{
Type: meta.ReadyCondition,
Status: metav1.ConditionFalse,
Reason: reason,
Message: message,
}
apimeta.SetStatusCondition(repository.GetStatusConditions(), newCondition)
meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionFalse, reason, message)
return repository
}
@ -265,10 +247,10 @@ func (in *GitRepository) GetInterval() metav1.Duration {
}
// +genclient
// +genclient:Namespaced
// +kubebuilder:object:root=true
// +kubebuilder:resource:shortName=gitrepo
// +kubebuilder:subresource:status
// +kubebuilder:deprecatedversion:warning="v1beta1 GitRepository is deprecated, upgrade to v1"
// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url`
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""

View File

@ -152,13 +152,7 @@ func HelmChartProgressing(chart HelmChart) HelmChart {
chart.Status.ObservedGeneration = chart.Generation
chart.Status.URL = ""
chart.Status.Conditions = []metav1.Condition{}
newCondition := metav1.Condition{
Type: meta.ReadyCondition,
Status: metav1.ConditionUnknown,
Reason: meta.ProgressingReason,
Message: "reconciliation in progress",
}
apimeta.SetStatusCondition(chart.GetStatusConditions(), newCondition)
meta.SetResourceCondition(&chart, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress")
return chart
}
@ -168,13 +162,7 @@ func HelmChartProgressing(chart HelmChart) HelmChart {
func HelmChartReady(chart HelmChart, artifact Artifact, url, reason, message string) HelmChart {
chart.Status.Artifact = &artifact
chart.Status.URL = url
newCondition := metav1.Condition{
Type: meta.ReadyCondition,
Status: metav1.ConditionTrue,
Reason: reason,
Message: message,
}
apimeta.SetStatusCondition(chart.GetStatusConditions(), newCondition)
meta.SetResourceCondition(&chart, meta.ReadyCondition, metav1.ConditionTrue, reason, message)
return chart
}
@ -182,13 +170,7 @@ func HelmChartReady(chart HelmChart, artifact Artifact, url, reason, message str
// 'False', with the given reason and message. It returns the modified
// HelmChart.
func HelmChartNotReady(chart HelmChart, reason, message string) HelmChart {
newCondition := metav1.Condition{
Type: meta.ReadyCondition,
Status: metav1.ConditionFalse,
Reason: reason,
Message: message,
}
apimeta.SetStatusCondition(chart.GetStatusConditions(), newCondition)
meta.SetResourceCondition(&chart, meta.ReadyCondition, metav1.ConditionFalse, reason, message)
return chart
}
@ -231,10 +213,10 @@ func (in *HelmChart) GetValuesFiles() []string {
}
// +genclient
// +genclient:Namespaced
// +kubebuilder:object:root=true
// +kubebuilder:resource:shortName=hc
// +kubebuilder:subresource:status
// +kubebuilder:deprecatedversion:warning="v1beta1 HelmChart is deprecated, upgrade to v1"
// +kubebuilder:printcolumn:name="Chart",type=string,JSONPath=`.spec.chart`
// +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.spec.version`
// +kubebuilder:printcolumn:name="Source Kind",type=string,JSONPath=`.spec.sourceRef.kind`

View File

@ -43,7 +43,7 @@ type HelmRepositorySpec struct {
// For HTTP/S basic auth the secret must contain username and
// password fields.
// For TLS the secret must contain a certFile and keyFile, and/or
// caFile fields.
// caCert fields.
// +optional
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
@ -113,13 +113,7 @@ func HelmRepositoryProgressing(repository HelmRepository) HelmRepository {
repository.Status.ObservedGeneration = repository.Generation
repository.Status.URL = ""
repository.Status.Conditions = []metav1.Condition{}
newCondition := metav1.Condition{
Type: meta.ReadyCondition,
Status: metav1.ConditionUnknown,
Reason: meta.ProgressingReason,
Message: "reconciliation in progress",
}
apimeta.SetStatusCondition(repository.GetStatusConditions(), newCondition)
meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, "reconciliation in progress")
return repository
}
@ -129,13 +123,7 @@ func HelmRepositoryProgressing(repository HelmRepository) HelmRepository {
func HelmRepositoryReady(repository HelmRepository, artifact Artifact, url, reason, message string) HelmRepository {
repository.Status.Artifact = &artifact
repository.Status.URL = url
newCondition := metav1.Condition{
Type: meta.ReadyCondition,
Status: metav1.ConditionTrue,
Reason: reason,
Message: message,
}
apimeta.SetStatusCondition(repository.GetStatusConditions(), newCondition)
meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionTrue, reason, message)
return repository
}
@ -143,13 +131,7 @@ func HelmRepositoryReady(repository HelmRepository, artifact Artifact, url, reas
// HelmRepository to 'False', with the given reason and message. It returns the
// modified HelmRepository.
func HelmRepositoryNotReady(repository HelmRepository, reason, message string) HelmRepository {
newCondition := metav1.Condition{
Type: meta.ReadyCondition,
Status: metav1.ConditionFalse,
Reason: reason,
Message: message,
}
apimeta.SetStatusCondition(repository.GetStatusConditions(), newCondition)
meta.SetResourceCondition(&repository, meta.ReadyCondition, metav1.ConditionFalse, reason, message)
return repository
}
@ -181,10 +163,10 @@ func (in *HelmRepository) GetInterval() metav1.Duration {
}
// +genclient
// +genclient:Namespaced
// +kubebuilder:object:root=true
// +kubebuilder:resource:shortName=helmrepo
// +kubebuilder:subresource:status
// +kubebuilder:deprecatedversion:warning="v1beta1 HelmRepository is deprecated, upgrade to v1"
// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url`
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""

View File

@ -1,7 +1,8 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2024 The Flux authors
Copyright 2020 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -1,159 +0,0 @@
/*
Copyright 2022 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta2
import (
"path"
"regexp"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// Artifact represents the output of a Source reconciliation.
//
// Deprecated: use Artifact from api/v1 instead. This type will be removed in
// a future release.
type Artifact struct {
// Path is the relative file path of the Artifact. It can be used to locate
// the file in the root of the Artifact storage on the local file system of
// the controller managing the Source.
// +required
Path string `json:"path"`
// URL is the HTTP address of the Artifact as exposed by the controller
// managing the Source. It can be used to retrieve the Artifact for
// consumption, e.g. by another controller applying the Artifact contents.
// +required
URL string `json:"url"`
// Revision is a human-readable identifier traceable in the origin source
// system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.
// +optional
Revision string `json:"revision"`
// Checksum is the SHA256 checksum of the Artifact file.
// Deprecated: use Artifact.Digest instead.
// +optional
Checksum string `json:"checksum,omitempty"`
// Digest is the digest of the file in the form of '<algorithm>:<checksum>'.
// +optional
// +kubebuilder:validation:Pattern="^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$"
Digest string `json:"digest,omitempty"`
// LastUpdateTime is the timestamp corresponding to the last update of the
// Artifact.
// +required
LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"`
// Size is the number of bytes in the file.
// +optional
Size *int64 `json:"size,omitempty"`
// Metadata holds upstream information such as OCI annotations.
// +optional
Metadata map[string]string `json:"metadata,omitempty"`
}
// HasRevision returns if the given revision matches the current Revision of
// the Artifact.
func (in *Artifact) HasRevision(revision string) bool {
if in == nil {
return false
}
return TransformLegacyRevision(in.Revision) == TransformLegacyRevision(revision)
}
// HasChecksum returns if the given checksum matches the current Checksum of
// the Artifact.
func (in *Artifact) HasChecksum(checksum string) bool {
if in == nil {
return false
}
return in.Checksum == checksum
}
// ArtifactDir returns the artifact dir path in the form of
// '<kind>/<namespace>/<name>'.
func ArtifactDir(kind, namespace, name string) string {
kind = strings.ToLower(kind)
return path.Join(kind, namespace, name)
}
// ArtifactPath returns the artifact path in the form of
// '<kind>/<namespace>/name>/<filename>'.
func ArtifactPath(kind, namespace, name, filename string) string {
return path.Join(ArtifactDir(kind, namespace, name), filename)
}
// TransformLegacyRevision transforms a "legacy" revision string into a "new"
// revision string. It accepts the following formats:
//
// - main/5394cb7f48332b2de7c17dd8b8384bbc84b7e738
// - feature/branch/5394cb7f48332b2de7c17dd8b8384bbc84b7e738
// - HEAD/5394cb7f48332b2de7c17dd8b8384bbc84b7e738
// - tag/55609ff9d959589ed917ce32e6bc0f0a36809565f308602c15c3668965979edc
// - d52bde83c5b2bd0fa7910264e0afc3ac9cfe9b6636ca29c05c09742f01d5a4bd
//
// Which are transformed into the following formats respectively:
//
// - main@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738
// - feature/branch@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738
// - sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738
// - tag@sha256:55609ff9d959589ed917ce32e6bc0f0a36809565f308602c15c3668965979edc
// - sha256:d52bde83c5b2bd0fa7910264e0afc3ac9cfe9b6636ca29c05c09742f01d5a4bd
//
// Deprecated, this function exists for backwards compatibility with existing
// resources, and to provide a transition period. Will be removed in a future
// release.
func TransformLegacyRevision(rev string) string {
if rev != "" && strings.LastIndex(rev, ":") == -1 {
if i := strings.LastIndex(rev, "/"); i >= 0 {
sha := rev[i+1:]
if algo := determineSHAType(sha); algo != "" {
if name := rev[:i]; name != "HEAD" {
return name + "@" + algo + ":" + sha
}
return algo + ":" + sha
}
}
if algo := determineSHAType(rev); algo != "" {
return algo + ":" + rev
}
}
return rev
}
// isAlphaNumHex returns true if the given string only contains 0-9 and a-f
// characters.
var isAlphaNumHex = regexp.MustCompile(`^[0-9a-f]+$`).MatchString
// determineSHAType returns the SHA algorithm used to compute the provided hex.
// The determination is heuristic and based on the length of the hex string. If
// the size is not recognized, an empty string is returned.
func determineSHAType(hex string) string {
if isAlphaNumHex(hex) {
switch len(hex) {
case 40:
return "sha1"
case 64:
return "sha256"
}
}
return ""
}

View File

@ -1,78 +0,0 @@
/*
Copyright 2023 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta2
import "testing"
func TestTransformLegacyRevision(t *testing.T) {
tests := []struct {
rev string
want string
}{
{
rev: "HEAD/5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
want: "sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
},
{
rev: "main/5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
want: "main@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
},
{
rev: "main@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
want: "main@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
},
{
rev: "feature/branch/5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
want: "feature/branch@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
},
{
rev: "feature/branch@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
want: "feature/branch@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
},
{
rev: "5ac85ca617f3774baff4ae0a420b810b2546dbc9af9f346b1d55c5ed9873c55c",
want: "sha256:5ac85ca617f3774baff4ae0a420b810b2546dbc9af9f346b1d55c5ed9873c55c",
},
{
rev: "v1.0.0",
want: "v1.0.0",
},
{
rev: "v1.0.0-rc1",
want: "v1.0.0-rc1",
},
{
rev: "v1.0.0-rc1+metadata",
want: "v1.0.0-rc1+metadata",
},
{
rev: "arbitrary/revision",
want: "arbitrary/revision",
},
{
rev: "5394cb7f48332b2de7c17dd8b8384bbc84b7xxxx",
want: "5394cb7f48332b2de7c17dd8b8384bbc84b7xxxx",
},
}
for _, tt := range tests {
t.Run(tt.rev, func(t *testing.T) {
if got := TransformLegacyRevision(tt.rev); got != tt.want {
t.Errorf("TransformLegacyRevision() = %v, want %v", got, tt.want)
}
})
}
}

View File

@ -1,301 +0,0 @@
/*
Copyright 2022 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta2
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/fluxcd/pkg/apis/acl"
"github.com/fluxcd/pkg/apis/meta"
apiv1 "github.com/fluxcd/source-controller/api/v1"
)
const (
// BucketKind is the string representation of a Bucket.
BucketKind = "Bucket"
)
const (
// BucketProviderGeneric for any S3 API compatible storage Bucket.
BucketProviderGeneric string = apiv1.BucketProviderGeneric
// BucketProviderAmazon for an AWS S3 object storage Bucket.
// Provides support for retrieving credentials from the AWS EC2 service.
BucketProviderAmazon string = apiv1.BucketProviderAmazon
// BucketProviderGoogle for a Google Cloud Storage Bucket.
// Provides support for authentication using a workload identity.
BucketProviderGoogle string = apiv1.BucketProviderGoogle
// BucketProviderAzure for an Azure Blob Storage Bucket.
// Provides support for authentication using a Service Principal,
// Managed Identity or Shared Key.
BucketProviderAzure string = apiv1.BucketProviderAzure
// GenericBucketProvider for any S3 API compatible storage Bucket.
//
// Deprecated: use BucketProviderGeneric.
GenericBucketProvider string = apiv1.BucketProviderGeneric
// AmazonBucketProvider for an AWS S3 object storage Bucket.
// Provides support for retrieving credentials from the AWS EC2 service.
//
// Deprecated: use BucketProviderAmazon.
AmazonBucketProvider string = apiv1.BucketProviderAmazon
// GoogleBucketProvider for a Google Cloud Storage Bucket.
// Provides support for authentication using a workload identity.
//
// Deprecated: use BucketProviderGoogle.
GoogleBucketProvider string = apiv1.BucketProviderGoogle
// AzureBucketProvider for an Azure Blob Storage Bucket.
// Provides support for authentication using a Service Principal,
// Managed Identity or Shared Key.
//
// Deprecated: use BucketProviderAzure.
AzureBucketProvider string = apiv1.BucketProviderAzure
)
// BucketSpec specifies the required configuration to produce an Artifact for
// an object storage bucket.
// +kubebuilder:validation:XValidation:rule="self.provider == 'aws' || self.provider == 'generic' || !has(self.sts)", message="STS configuration is only supported for the 'aws' and 'generic' Bucket providers"
// +kubebuilder:validation:XValidation:rule="self.provider != 'aws' || !has(self.sts) || self.sts.provider == 'aws'", message="'aws' is the only supported STS provider for the 'aws' Bucket provider"
// +kubebuilder:validation:XValidation:rule="self.provider != 'generic' || !has(self.sts) || self.sts.provider == 'ldap'", message="'ldap' is the only supported STS provider for the 'generic' Bucket provider"
// +kubebuilder:validation:XValidation:rule="!has(self.sts) || self.sts.provider != 'aws' || !has(self.sts.secretRef)", message="spec.sts.secretRef is not required for the 'aws' STS provider"
// +kubebuilder:validation:XValidation:rule="!has(self.sts) || self.sts.provider != 'aws' || !has(self.sts.certSecretRef)", message="spec.sts.certSecretRef is not required for the 'aws' STS provider"
type BucketSpec struct {
// Provider of the object storage bucket.
// Defaults to 'generic', which expects an S3 (API) compatible object
// storage.
// +kubebuilder:validation:Enum=generic;aws;gcp;azure
// +kubebuilder:default:=generic
// +optional
Provider string `json:"provider,omitempty"`
// BucketName is the name of the object storage bucket.
// +required
BucketName string `json:"bucketName"`
// Endpoint is the object storage address the BucketName is located at.
// +required
Endpoint string `json:"endpoint"`
// STS specifies the required configuration to use a Security Token
// Service for fetching temporary credentials to authenticate in a
// Bucket provider.
//
// This field is only supported for the `aws` and `generic` providers.
// +optional
STS *BucketSTSSpec `json:"sts,omitempty"`
// Insecure allows connecting to a non-TLS HTTP Endpoint.
// +optional
Insecure bool `json:"insecure,omitempty"`
// Region of the Endpoint where the BucketName is located in.
// +optional
Region string `json:"region,omitempty"`
// Prefix to use for server-side filtering of files in the Bucket.
// +optional
Prefix string `json:"prefix,omitempty"`
// SecretRef specifies the Secret containing authentication credentials
// for the Bucket.
// +optional
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
// CertSecretRef can be given the name of a Secret containing
// either or both of
//
// - a PEM-encoded client certificate (`tls.crt`) and private
// key (`tls.key`);
// - a PEM-encoded CA certificate (`ca.crt`)
//
// and whichever are supplied, will be used for connecting to the
// bucket. The client cert and key are useful if you are
// authenticating with a certificate; the CA cert is useful if
// you are using a self-signed server certificate. The Secret must
// be of type `Opaque` or `kubernetes.io/tls`.
//
// This field is only supported for the `generic` provider.
// +optional
CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"`
// ProxySecretRef specifies the Secret containing the proxy configuration
// to use while communicating with the Bucket server.
// +optional
ProxySecretRef *meta.LocalObjectReference `json:"proxySecretRef,omitempty"`
// Interval at which the Bucket Endpoint is checked for updates.
// This interval is approximate and may be subject to jitter to ensure
// efficient use of resources.
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
// +required
Interval metav1.Duration `json:"interval"`
// Timeout for fetch operations, defaults to 60s.
// +kubebuilder:default="60s"
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$"
// +optional
Timeout *metav1.Duration `json:"timeout,omitempty"`
// Ignore overrides the set of excluded patterns in the .sourceignore format
// (which is the same as .gitignore). If not provided, a default will be used,
// consult the documentation for your version to find out what those are.
// +optional
Ignore *string `json:"ignore,omitempty"`
// Suspend tells the controller to suspend the reconciliation of this
// Bucket.
// +optional
Suspend bool `json:"suspend,omitempty"`
// AccessFrom specifies an Access Control List for allowing cross-namespace
// references to this object.
// NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092
// +optional
AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"`
}
// BucketSTSSpec specifies the required configuration to use a Security Token
// Service for fetching temporary credentials to authenticate in a Bucket
// provider.
type BucketSTSSpec struct {
// Provider of the Security Token Service.
// +kubebuilder:validation:Enum=aws;ldap
// +required
Provider string `json:"provider"`
// Endpoint is the HTTP/S endpoint of the Security Token Service from
// where temporary credentials will be fetched.
// +required
// +kubebuilder:validation:Pattern="^(http|https)://.*$"
Endpoint string `json:"endpoint"`
// SecretRef specifies the Secret containing authentication credentials
// for the STS endpoint. This Secret must contain the fields `username`
// and `password` and is supported only for the `ldap` provider.
// +optional
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
// CertSecretRef can be given the name of a Secret containing
// either or both of
//
// - a PEM-encoded client certificate (`tls.crt`) and private
// key (`tls.key`);
// - a PEM-encoded CA certificate (`ca.crt`)
//
// and whichever are supplied, will be used for connecting to the
// STS endpoint. The client cert and key are useful if you are
// authenticating with a certificate; the CA cert is useful if
// you are using a self-signed server certificate. The Secret must
// be of type `Opaque` or `kubernetes.io/tls`.
//
// This field is only supported for the `ldap` provider.
// +optional
CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"`
}
// BucketStatus records the observed state of a Bucket.
type BucketStatus struct {
// ObservedGeneration is the last observed generation of the Bucket object.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
// Conditions holds the conditions for the Bucket.
// +optional
Conditions []metav1.Condition `json:"conditions,omitempty"`
// URL is the dynamic fetch link for the latest Artifact.
// It is provided on a "best effort" basis, and using the precise
// BucketStatus.Artifact data is recommended.
// +optional
URL string `json:"url,omitempty"`
// Artifact represents the last successful Bucket reconciliation.
// +optional
Artifact *apiv1.Artifact `json:"artifact,omitempty"`
// ObservedIgnore is the observed exclusion patterns used for constructing
// the source artifact.
// +optional
ObservedIgnore *string `json:"observedIgnore,omitempty"`
meta.ReconcileRequestStatus `json:",inline"`
}
const (
// BucketOperationSucceededReason signals that the Bucket listing and fetch
// operations succeeded.
BucketOperationSucceededReason string = "BucketOperationSucceeded"
// BucketOperationFailedReason signals that the Bucket listing or fetch
// operations failed.
BucketOperationFailedReason string = "BucketOperationFailed"
)
// GetConditions returns the status conditions of the object.
func (in Bucket) GetConditions() []metav1.Condition {
return in.Status.Conditions
}
// SetConditions sets the status conditions on the object.
func (in *Bucket) SetConditions(conditions []metav1.Condition) {
in.Status.Conditions = conditions
}
// GetRequeueAfter returns the duration after which the source must be reconciled again.
func (in Bucket) GetRequeueAfter() time.Duration {
return in.Spec.Interval.Duration
}
// GetArtifact returns the latest artifact from the source if present in the status sub-resource.
func (in *Bucket) GetArtifact() *apiv1.Artifact {
return in.Status.Artifact
}
// +genclient
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:deprecatedversion:warning="v1beta2 Bucket is deprecated, upgrade to v1"
// +kubebuilder:printcolumn:name="Endpoint",type=string,JSONPath=`.spec.endpoint`
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
// Bucket is the Schema for the buckets API.
type Bucket struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec BucketSpec `json:"spec,omitempty"`
// +kubebuilder:default={"observedGeneration":-1}
Status BucketStatus `json:"status,omitempty"`
}
// BucketList contains a list of Bucket objects.
// +kubebuilder:object:root=true
type BucketList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Bucket `json:"items"`
}
func init() {
SchemeBuilder.Register(&Bucket{}, &BucketList{})
}

View File

@ -1,107 +0,0 @@
/*
Copyright 2022 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta2
const SourceFinalizer = "finalizers.fluxcd.io"
const (
// ArtifactInStorageCondition indicates the availability of the Artifact in
// the storage.
// If True, the Artifact is stored successfully.
// This Condition is only present on the resource if the Artifact is
// successfully stored.
ArtifactInStorageCondition string = "ArtifactInStorage"
// ArtifactOutdatedCondition indicates the current Artifact of the Source
// is outdated.
// This is a "negative polarity" or "abnormal-true" type, and is only
// present on the resource if it is True.
ArtifactOutdatedCondition string = "ArtifactOutdated"
// SourceVerifiedCondition indicates the integrity verification of the
// Source.
// If True, the integrity check succeeded. If False, it failed.
// This Condition is only present on the resource if the integrity check
// is enabled.
SourceVerifiedCondition string = "SourceVerified"
// FetchFailedCondition indicates a transient or persistent fetch failure
// of an upstream Source.
// If True, observations on the upstream Source revision may be impossible,
// and the Artifact available for the Source may be outdated.
// This is a "negative polarity" or "abnormal-true" type, and is only
// present on the resource if it is True.
FetchFailedCondition string = "FetchFailed"
// BuildFailedCondition indicates a transient or persistent build failure
// of a Source's Artifact.
// If True, the Source can be in an ArtifactOutdatedCondition.
// This is a "negative polarity" or "abnormal-true" type, and is only
// present on the resource if it is True.
BuildFailedCondition string = "BuildFailed"
// StorageOperationFailedCondition indicates a transient or persistent
// failure related to storage. If True, the reconciliation failed while
// performing some filesystem operation.
// This is a "negative polarity" or "abnormal-true" type, and is only
// present on the resource if it is True.
StorageOperationFailedCondition string = "StorageOperationFailed"
)
// Reasons are provided as utility, and not part of the declarative API.
const (
// URLInvalidReason signals that a given Source has an invalid URL.
URLInvalidReason string = "URLInvalid"
// AuthenticationFailedReason signals that a Secret does not have the
// required fields, or the provided credentials do not match.
AuthenticationFailedReason string = "AuthenticationFailed"
// VerificationError signals that the Source's verification
// check failed.
VerificationError string = "VerificationError"
// DirCreationFailedReason signals a failure caused by a directory creation
// operation.
DirCreationFailedReason string = "DirectoryCreationFailed"
// StatOperationFailedReason signals a failure caused by a stat operation on
// a path.
StatOperationFailedReason string = "StatOperationFailed"
// ReadOperationFailedReason signals a failure caused by a read operation.
ReadOperationFailedReason string = "ReadOperationFailed"
// AcquireLockFailedReason signals a failure in acquiring lock.
AcquireLockFailedReason string = "AcquireLockFailed"
// InvalidPathReason signals a failure caused by an invalid path.
InvalidPathReason string = "InvalidPath"
// ArchiveOperationFailedReason signals a failure in archive operation.
ArchiveOperationFailedReason string = "ArchiveOperationFailed"
// SymlinkUpdateFailedReason signals a failure in updating a symlink.
SymlinkUpdateFailedReason string = "SymlinkUpdateFailed"
// ArtifactUpToDateReason signals that an existing Artifact is up-to-date
// with the Source.
ArtifactUpToDateReason string = "ArtifactUpToDate"
// CacheOperationFailedReason signals a failure in cache operation.
CacheOperationFailedReason string = "CacheOperationFailed"
)

View File

@ -1,319 +0,0 @@
/*
Copyright 2022 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta2
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/fluxcd/pkg/apis/acl"
"github.com/fluxcd/pkg/apis/meta"
apiv1 "github.com/fluxcd/source-controller/api/v1"
)
const (
// GitRepositoryKind is the string representation of a GitRepository.
GitRepositoryKind = "GitRepository"
// GoGitImplementation for performing Git operations using go-git.
GoGitImplementation = "go-git"
// LibGit2Implementation for performing Git operations using libgit2.
LibGit2Implementation = "libgit2"
)
const (
// IncludeUnavailableCondition indicates one of the includes is not
// available. For example, because it does not exist, or does not have an
// Artifact.
// This is a "negative polarity" or "abnormal-true" type, and is only
// present on the resource if it is True.
IncludeUnavailableCondition string = "IncludeUnavailable"
)
// GitRepositorySpec specifies the required configuration to produce an
// Artifact for a Git repository.
type GitRepositorySpec struct {
// URL specifies the Git repository URL, it can be an HTTP/S or SSH address.
// +kubebuilder:validation:Pattern="^(http|https|ssh)://.*$"
// +required
URL string `json:"url"`
// SecretRef specifies the Secret containing authentication credentials for
// the GitRepository.
// For HTTPS repositories the Secret must contain 'username' and 'password'
// fields for basic auth or 'bearerToken' field for token auth.
// For SSH repositories the Secret must contain 'identity'
// and 'known_hosts' fields.
// +optional
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
// Interval at which to check the GitRepository for updates.
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
// +required
Interval metav1.Duration `json:"interval"`
// Timeout for Git operations like cloning, defaults to 60s.
// +kubebuilder:default="60s"
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$"
// +optional
Timeout *metav1.Duration `json:"timeout,omitempty"`
// Reference specifies the Git reference to resolve and monitor for
// changes, defaults to the 'master' branch.
// +optional
Reference *GitRepositoryRef `json:"ref,omitempty"`
// Verification specifies the configuration to verify the Git commit
// signature(s).
// +optional
Verification *GitRepositoryVerification `json:"verify,omitempty"`
// Ignore overrides the set of excluded patterns in the .sourceignore format
// (which is the same as .gitignore). If not provided, a default will be used,
// consult the documentation for your version to find out what those are.
// +optional
Ignore *string `json:"ignore,omitempty"`
// Suspend tells the controller to suspend the reconciliation of this
// GitRepository.
// +optional
Suspend bool `json:"suspend,omitempty"`
// GitImplementation specifies which Git client library implementation to
// use. Defaults to 'go-git', valid values are ('go-git', 'libgit2').
// Deprecated: gitImplementation is deprecated now that 'go-git' is the
// only supported implementation.
// +kubebuilder:validation:Enum=go-git;libgit2
// +kubebuilder:default:=go-git
// +optional
GitImplementation string `json:"gitImplementation,omitempty"`
// RecurseSubmodules enables the initialization of all submodules within
// the GitRepository as cloned from the URL, using their default settings.
// +optional
RecurseSubmodules bool `json:"recurseSubmodules,omitempty"`
// Include specifies a list of GitRepository resources which Artifacts
// should be included in the Artifact produced for this GitRepository.
Include []GitRepositoryInclude `json:"include,omitempty"`
// AccessFrom specifies an Access Control List for allowing cross-namespace
// references to this object.
// NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092
// +optional
AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"`
}
// GitRepositoryInclude specifies a local reference to a GitRepository which
// Artifact (sub-)contents must be included, and where they should be placed.
type GitRepositoryInclude struct {
// GitRepositoryRef specifies the GitRepository which Artifact contents
// must be included.
GitRepositoryRef meta.LocalObjectReference `json:"repository"`
// FromPath specifies the path to copy contents from, defaults to the root
// of the Artifact.
// +optional
FromPath string `json:"fromPath"`
// ToPath specifies the path to copy contents to, defaults to the name of
// the GitRepositoryRef.
// +optional
ToPath string `json:"toPath"`
}
// GetFromPath returns the specified FromPath.
func (in *GitRepositoryInclude) GetFromPath() string {
return in.FromPath
}
// GetToPath returns the specified ToPath, falling back to the name of the
// GitRepositoryRef.
func (in *GitRepositoryInclude) GetToPath() string {
if in.ToPath == "" {
return in.GitRepositoryRef.Name
}
return in.ToPath
}
// GitRepositoryRef specifies the Git reference to resolve and checkout.
type GitRepositoryRef struct {
// Branch to check out, defaults to 'master' if no other field is defined.
// +optional
Branch string `json:"branch,omitempty"`
// Tag to check out, takes precedence over Branch.
// +optional
Tag string `json:"tag,omitempty"`
// SemVer tag expression to check out, takes precedence over Tag.
// +optional
SemVer string `json:"semver,omitempty"`
// Name of the reference to check out; takes precedence over Branch, Tag and SemVer.
//
// It must be a valid Git reference: https://git-scm.com/docs/git-check-ref-format#_description
// Examples: "refs/heads/main", "refs/tags/v0.1.0", "refs/pull/420/head", "refs/merge-requests/1/head"
// +optional
Name string `json:"name,omitempty"`
// Commit SHA to check out, takes precedence over all reference fields.
//
// This can be combined with Branch to shallow clone the branch, in which
// the commit is expected to exist.
// +optional
Commit string `json:"commit,omitempty"`
}
// GitRepositoryVerification specifies the Git commit signature verification
// strategy.
type GitRepositoryVerification struct {
// Mode specifies what Git object should be verified, currently ('head').
// +kubebuilder:validation:Enum=head
Mode string `json:"mode"`
// SecretRef specifies the Secret containing the public keys of trusted Git
// authors.
SecretRef meta.LocalObjectReference `json:"secretRef"`
}
// GitRepositoryStatus records the observed state of a Git repository.
type GitRepositoryStatus struct {
// ObservedGeneration is the last observed generation of the GitRepository
// object.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
// Conditions holds the conditions for the GitRepository.
// +optional
Conditions []metav1.Condition `json:"conditions,omitempty"`
// URL is the dynamic fetch link for the latest Artifact.
// It is provided on a "best effort" basis, and using the precise
// GitRepositoryStatus.Artifact data is recommended.
// +optional
URL string `json:"url,omitempty"`
// Artifact represents the last successful GitRepository reconciliation.
// +optional
Artifact *apiv1.Artifact `json:"artifact,omitempty"`
// IncludedArtifacts contains a list of the last successfully included
// Artifacts as instructed by GitRepositorySpec.Include.
// +optional
IncludedArtifacts []*apiv1.Artifact `json:"includedArtifacts,omitempty"`
// ContentConfigChecksum is a checksum of all the configurations related to
// the content of the source artifact:
// - .spec.ignore
// - .spec.recurseSubmodules
// - .spec.included and the checksum of the included artifacts
// observed in .status.observedGeneration version of the object. This can
// be used to determine if the content of the included repository has
// changed.
// It has the format of `<algo>:<checksum>`, for example: `sha256:<checksum>`.
//
// Deprecated: Replaced with explicit fields for observed artifact content
// config in the status.
// +optional
ContentConfigChecksum string `json:"contentConfigChecksum,omitempty"`
// ObservedIgnore is the observed exclusion patterns used for constructing
// the source artifact.
// +optional
ObservedIgnore *string `json:"observedIgnore,omitempty"`
// ObservedRecurseSubmodules is the observed resource submodules
// configuration used to produce the current Artifact.
// +optional
ObservedRecurseSubmodules bool `json:"observedRecurseSubmodules,omitempty"`
// ObservedInclude is the observed list of GitRepository resources used to
// to produce the current Artifact.
// +optional
ObservedInclude []GitRepositoryInclude `json:"observedInclude,omitempty"`
meta.ReconcileRequestStatus `json:",inline"`
}
const (
// GitOperationSucceedReason signals that a Git operation (e.g. clone,
// checkout, etc.) succeeded.
GitOperationSucceedReason string = "GitOperationSucceeded"
// GitOperationFailedReason signals that a Git operation (e.g. clone,
// checkout, etc.) failed.
GitOperationFailedReason string = "GitOperationFailed"
)
// GetConditions returns the status conditions of the object.
func (in GitRepository) GetConditions() []metav1.Condition {
return in.Status.Conditions
}
// SetConditions sets the status conditions on the object.
func (in *GitRepository) SetConditions(conditions []metav1.Condition) {
in.Status.Conditions = conditions
}
// GetRequeueAfter returns the duration after which the GitRepository must be
// reconciled again.
func (in GitRepository) GetRequeueAfter() time.Duration {
return in.Spec.Interval.Duration
}
// GetArtifact returns the latest Artifact from the GitRepository if present in
// the status sub-resource.
func (in *GitRepository) GetArtifact() *apiv1.Artifact {
return in.Status.Artifact
}
// +genclient
// +kubebuilder:object:root=true
// +kubebuilder:resource:shortName=gitrepo
// +kubebuilder:subresource:status
// +kubebuilder:deprecatedversion:warning="v1beta2 GitRepository is deprecated, upgrade to v1"
// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url`
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
// GitRepository is the Schema for the gitrepositories API.
type GitRepository struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec GitRepositorySpec `json:"spec,omitempty"`
// +kubebuilder:default={"observedGeneration":-1}
Status GitRepositoryStatus `json:"status,omitempty"`
}
// GitRepositoryList contains a list of GitRepository objects.
// +kubebuilder:object:root=true
type GitRepositoryList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []GitRepository `json:"items"`
}
func init() {
SchemeBuilder.Register(&GitRepository{}, &GitRepositoryList{})
}

View File

@ -1,33 +0,0 @@
/*
Copyright 2022 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta2
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// GroupVersion is group version used to register these objects.
GroupVersion = schema.GroupVersion{Group: "source.toolkit.fluxcd.io", Version: "v1beta2"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme.
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)

View File

@ -1,250 +0,0 @@
/*
Copyright 2022 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta2
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/fluxcd/pkg/apis/acl"
"github.com/fluxcd/pkg/apis/meta"
apiv1 "github.com/fluxcd/source-controller/api/v1"
)
// HelmChartKind is the string representation of a HelmChart.
const HelmChartKind = "HelmChart"
// HelmChartSpec specifies the desired state of a Helm chart.
type HelmChartSpec struct {
// Chart is the name or path the Helm chart is available at in the
// SourceRef.
// +required
Chart string `json:"chart"`
// Version is the chart version semver expression, ignored for charts from
// GitRepository and Bucket sources. Defaults to latest when omitted.
// +kubebuilder:default:=*
// +optional
Version string `json:"version,omitempty"`
// SourceRef is the reference to the Source the chart is available at.
// +required
SourceRef LocalHelmChartSourceReference `json:"sourceRef"`
// Interval at which the HelmChart SourceRef is checked for updates.
// This interval is approximate and may be subject to jitter to ensure
// efficient use of resources.
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
// +required
Interval metav1.Duration `json:"interval"`
// ReconcileStrategy determines what enables the creation of a new artifact.
// Valid values are ('ChartVersion', 'Revision').
// See the documentation of the values for an explanation on their behavior.
// Defaults to ChartVersion when omitted.
// +kubebuilder:validation:Enum=ChartVersion;Revision
// +kubebuilder:default:=ChartVersion
// +optional
ReconcileStrategy string `json:"reconcileStrategy,omitempty"`
// ValuesFiles is an alternative list of values files to use as the chart
// values (values.yaml is not included by default), expected to be a
// relative path in the SourceRef.
// Values files are merged in the order of this list with the last file
// overriding the first. Ignored when omitted.
// +optional
ValuesFiles []string `json:"valuesFiles,omitempty"`
// ValuesFile is an alternative values file to use as the default chart
// values, expected to be a relative path in the SourceRef. Deprecated in
// favor of ValuesFiles, for backwards compatibility the file specified here
// is merged before the ValuesFiles items. Ignored when omitted.
// +optional
// +deprecated
ValuesFile string `json:"valuesFile,omitempty"`
// IgnoreMissingValuesFiles controls whether to silently ignore missing values
// files rather than failing.
// +optional
IgnoreMissingValuesFiles bool `json:"ignoreMissingValuesFiles,omitempty"`
// Suspend tells the controller to suspend the reconciliation of this
// source.
// +optional
Suspend bool `json:"suspend,omitempty"`
// AccessFrom specifies an Access Control List for allowing cross-namespace
// references to this object.
// NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092
// +optional
AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"`
// Verify contains the secret name containing the trusted public keys
// used to verify the signature and specifies which provider to use to check
// whether OCI image is authentic.
// This field is only supported when using HelmRepository source with spec.type 'oci'.
// Chart dependencies, which are not bundled in the umbrella chart artifact, are not verified.
// +optional
Verify *apiv1.OCIRepositoryVerification `json:"verify,omitempty"`
}
const (
// ReconcileStrategyChartVersion reconciles when the version of the Helm chart is different.
ReconcileStrategyChartVersion string = "ChartVersion"
// ReconcileStrategyRevision reconciles when the Revision of the source is different.
ReconcileStrategyRevision string = "Revision"
)
// LocalHelmChartSourceReference contains enough information to let you locate
// the typed referenced object at namespace level.
type LocalHelmChartSourceReference struct {
// APIVersion of the referent.
// +optional
APIVersion string `json:"apiVersion,omitempty"`
// Kind of the referent, valid values are ('HelmRepository', 'GitRepository',
// 'Bucket').
// +kubebuilder:validation:Enum=HelmRepository;GitRepository;Bucket
// +required
Kind string `json:"kind"`
// Name of the referent.
// +required
Name string `json:"name"`
}
// HelmChartStatus records the observed state of the HelmChart.
type HelmChartStatus struct {
// ObservedGeneration is the last observed generation of the HelmChart
// object.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
// ObservedSourceArtifactRevision is the last observed Artifact.Revision
// of the HelmChartSpec.SourceRef.
// +optional
ObservedSourceArtifactRevision string `json:"observedSourceArtifactRevision,omitempty"`
// ObservedChartName is the last observed chart name as specified by the
// resolved chart reference.
// +optional
ObservedChartName string `json:"observedChartName,omitempty"`
// ObservedValuesFiles are the observed value files of the last successful
// reconciliation.
// It matches the chart in the last successfully reconciled artifact.
// +optional
ObservedValuesFiles []string `json:"observedValuesFiles,omitempty"`
// Conditions holds the conditions for the HelmChart.
// +optional
Conditions []metav1.Condition `json:"conditions,omitempty"`
// URL is the dynamic fetch link for the latest Artifact.
// It is provided on a "best effort" basis, and using the precise
// BucketStatus.Artifact data is recommended.
// +optional
URL string `json:"url,omitempty"`
// Artifact represents the output of the last successful reconciliation.
// +optional
Artifact *apiv1.Artifact `json:"artifact,omitempty"`
meta.ReconcileRequestStatus `json:",inline"`
}
const (
// ChartPullSucceededReason signals that the pull of the Helm chart
// succeeded.
ChartPullSucceededReason string = "ChartPullSucceeded"
// ChartPackageSucceededReason signals that the package of the Helm
// chart succeeded.
ChartPackageSucceededReason string = "ChartPackageSucceeded"
)
// GetConditions returns the status conditions of the object.
func (in HelmChart) GetConditions() []metav1.Condition {
return in.Status.Conditions
}
// SetConditions sets the status conditions on the object.
func (in *HelmChart) SetConditions(conditions []metav1.Condition) {
in.Status.Conditions = conditions
}
// GetRequeueAfter returns the duration after which the source must be
// reconciled again.
func (in HelmChart) GetRequeueAfter() time.Duration {
return in.Spec.Interval.Duration
}
// GetArtifact returns the latest artifact from the source if present in the
// status sub-resource.
func (in *HelmChart) GetArtifact() *apiv1.Artifact {
return in.Status.Artifact
}
// GetValuesFiles returns a merged list of HelmChartSpec.ValuesFiles.
func (in *HelmChart) GetValuesFiles() []string {
valuesFiles := in.Spec.ValuesFiles
// Prepend the deprecated ValuesFile to the list
if in.Spec.ValuesFile != "" {
valuesFiles = append([]string{in.Spec.ValuesFile}, valuesFiles...)
}
return valuesFiles
}
// +genclient
// +kubebuilder:object:root=true
// +kubebuilder:resource:shortName=hc
// +kubebuilder:subresource:status
// +kubebuilder:deprecatedversion:warning="v1beta2 HelmChart is deprecated, upgrade to v1"
// +kubebuilder:printcolumn:name="Chart",type=string,JSONPath=`.spec.chart`
// +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.spec.version`
// +kubebuilder:printcolumn:name="Source Kind",type=string,JSONPath=`.spec.sourceRef.kind`
// +kubebuilder:printcolumn:name="Source Name",type=string,JSONPath=`.spec.sourceRef.name`
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
// HelmChart is the Schema for the helmcharts API.
type HelmChart struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec HelmChartSpec `json:"spec,omitempty"`
// +kubebuilder:default={"observedGeneration":-1}
Status HelmChartStatus `json:"status,omitempty"`
}
// HelmChartList contains a list of HelmChart objects.
// +kubebuilder:object:root=true
type HelmChartList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []HelmChart `json:"items"`
}
func init() {
SchemeBuilder.Register(&HelmChart{}, &HelmChartList{})
}

View File

@ -1,230 +0,0 @@
/*
Copyright 2022 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta2
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/fluxcd/pkg/apis/acl"
"github.com/fluxcd/pkg/apis/meta"
apiv1 "github.com/fluxcd/source-controller/api/v1"
)
const (
// HelmRepositoryKind is the string representation of a HelmRepository.
HelmRepositoryKind = "HelmRepository"
// HelmRepositoryURLIndexKey is the key used for indexing HelmRepository
// objects by their HelmRepositorySpec.URL.
HelmRepositoryURLIndexKey = ".metadata.helmRepositoryURL"
// HelmRepositoryTypeDefault is the default HelmRepository type.
// It is used when no type is specified and corresponds to a Helm repository.
HelmRepositoryTypeDefault = "default"
// HelmRepositoryTypeOCI is the type for an OCI repository.
HelmRepositoryTypeOCI = "oci"
)
// HelmRepositorySpec specifies the required configuration to produce an
// Artifact for a Helm repository index YAML.
type HelmRepositorySpec struct {
// URL of the Helm repository, a valid URL contains at least a protocol and
// host.
// +kubebuilder:validation:Pattern="^(http|https|oci)://.*$"
// +required
URL string `json:"url"`
// SecretRef specifies the Secret containing authentication credentials
// for the HelmRepository.
// For HTTP/S basic auth the secret must contain 'username' and 'password'
// fields.
// Support for TLS auth using the 'certFile' and 'keyFile', and/or 'caFile'
// keys is deprecated. Please use `.spec.certSecretRef` instead.
// +optional
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
// CertSecretRef can be given the name of a Secret containing
// either or both of
//
// - a PEM-encoded client certificate (`tls.crt`) and private
// key (`tls.key`);
// - a PEM-encoded CA certificate (`ca.crt`)
//
// and whichever are supplied, will be used for connecting to the
// registry. The client cert and key are useful if you are
// authenticating with a certificate; the CA cert is useful if
// you are using a self-signed server certificate. The Secret must
// be of type `Opaque` or `kubernetes.io/tls`.
//
// It takes precedence over the values specified in the Secret referred
// to by `.spec.secretRef`.
// +optional
CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"`
// PassCredentials allows the credentials from the SecretRef to be passed
// on to a host that does not match the host as defined in URL.
// This may be required if the host of the advertised chart URLs in the
// index differ from the defined URL.
// Enabling this should be done with caution, as it can potentially result
// in credentials getting stolen in a MITM-attack.
// +optional
PassCredentials bool `json:"passCredentials,omitempty"`
// Interval at which the HelmRepository URL is checked for updates.
// This interval is approximate and may be subject to jitter to ensure
// efficient use of resources.
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
// +optional
Interval metav1.Duration `json:"interval,omitempty"`
// Insecure allows connecting to a non-TLS HTTP container registry.
// This field is only taken into account if the .spec.type field is set to 'oci'.
// +optional
Insecure bool `json:"insecure,omitempty"`
// Timeout is used for the index fetch operation for an HTTPS helm repository,
// and for remote OCI Repository operations like pulling for an OCI helm
// chart by the associated HelmChart.
// Its default value is 60s.
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$"
// +optional
Timeout *metav1.Duration `json:"timeout,omitempty"`
// Suspend tells the controller to suspend the reconciliation of this
// HelmRepository.
// +optional
Suspend bool `json:"suspend,omitempty"`
// AccessFrom specifies an Access Control List for allowing cross-namespace
// references to this object.
// NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092
// +optional
AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"`
// Type of the HelmRepository.
// When this field is set to "oci", the URL field value must be prefixed with "oci://".
// +kubebuilder:validation:Enum=default;oci
// +optional
Type string `json:"type,omitempty"`
// Provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'.
// This field is optional, and only taken into account if the .spec.type field is set to 'oci'.
// When not specified, defaults to 'generic'.
// +kubebuilder:validation:Enum=generic;aws;azure;gcp
// +kubebuilder:default:=generic
// +optional
Provider string `json:"provider,omitempty"`
}
// HelmRepositoryStatus records the observed state of the HelmRepository.
type HelmRepositoryStatus struct {
// ObservedGeneration is the last observed generation of the HelmRepository
// object.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
// Conditions holds the conditions for the HelmRepository.
// +optional
Conditions []metav1.Condition `json:"conditions,omitempty"`
// URL is the dynamic fetch link for the latest Artifact.
// It is provided on a "best effort" basis, and using the precise
// HelmRepositoryStatus.Artifact data is recommended.
// +optional
URL string `json:"url,omitempty"`
// Artifact represents the last successful HelmRepository reconciliation.
// +optional
Artifact *apiv1.Artifact `json:"artifact,omitempty"`
meta.ReconcileRequestStatus `json:",inline"`
}
const (
// IndexationFailedReason signals that the HelmRepository index fetch
// failed.
IndexationFailedReason string = "IndexationFailed"
)
// GetConditions returns the status conditions of the object.
func (in HelmRepository) GetConditions() []metav1.Condition {
return in.Status.Conditions
}
// SetConditions sets the status conditions on the object.
func (in *HelmRepository) SetConditions(conditions []metav1.Condition) {
in.Status.Conditions = conditions
}
// GetRequeueAfter returns the duration after which the source must be
// reconciled again.
func (in HelmRepository) GetRequeueAfter() time.Duration {
if in.Spec.Interval.Duration != 0 {
return in.Spec.Interval.Duration
}
return time.Minute
}
// GetTimeout returns the timeout duration used for various operations related
// to this HelmRepository.
func (in HelmRepository) GetTimeout() time.Duration {
if in.Spec.Timeout != nil {
return in.Spec.Timeout.Duration
}
return time.Minute
}
// GetArtifact returns the latest artifact from the source if present in the
// status sub-resource.
func (in *HelmRepository) GetArtifact() *apiv1.Artifact {
return in.Status.Artifact
}
// +genclient
// +kubebuilder:object:root=true
// +kubebuilder:resource:shortName=helmrepo
// +kubebuilder:subresource:status
// +kubebuilder:deprecatedversion:warning="v1beta2 HelmRepository is deprecated, upgrade to v1"
// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url`
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
// HelmRepository is the Schema for the helmrepositories API.
type HelmRepository struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec HelmRepositorySpec `json:"spec,omitempty"`
// +kubebuilder:default={"observedGeneration":-1}
Status HelmRepositoryStatus `json:"status,omitempty"`
}
// HelmRepositoryList contains a list of HelmRepository objects.
// +kubebuilder:object:root=true
type HelmRepositoryList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []HelmRepository `json:"items"`
}
func init() {
SchemeBuilder.Register(&HelmRepository{}, &HelmRepositoryList{})
}

View File

@ -1,315 +0,0 @@
/*
Copyright 2022 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta2
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/fluxcd/pkg/apis/meta"
apiv1 "github.com/fluxcd/source-controller/api/v1"
)
const (
// OCIRepositoryKind is the string representation of a OCIRepository.
OCIRepositoryKind = "OCIRepository"
// OCIRepositoryPrefix is the prefix used for OCIRepository URLs.
OCIRepositoryPrefix = "oci://"
// GenericOCIProvider provides support for authentication using static credentials
// for any OCI compatible API such as Docker Registry, GitHub Container Registry,
// Docker Hub, Quay, etc.
GenericOCIProvider string = "generic"
// AmazonOCIProvider provides support for OCI authentication using AWS IRSA.
AmazonOCIProvider string = "aws"
// GoogleOCIProvider provides support for OCI authentication using GCP workload identity.
GoogleOCIProvider string = "gcp"
// AzureOCIProvider provides support for OCI authentication using a Azure Service Principal,
// Managed Identity or Shared Key.
AzureOCIProvider string = "azure"
// OCILayerExtract defines the operation type for extracting the content from an OCI artifact layer.
OCILayerExtract = "extract"
// OCILayerCopy defines the operation type for copying the content from an OCI artifact layer.
OCILayerCopy = "copy"
)
// OCIRepositorySpec defines the desired state of OCIRepository
type OCIRepositorySpec struct {
// URL is a reference to an OCI artifact repository hosted
// on a remote container registry.
// +kubebuilder:validation:Pattern="^oci://.*$"
// +required
URL string `json:"url"`
// The OCI reference to pull and monitor for changes,
// defaults to the latest tag.
// +optional
Reference *OCIRepositoryRef `json:"ref,omitempty"`
// LayerSelector specifies which layer should be extracted from the OCI artifact.
// When not specified, the first layer found in the artifact is selected.
// +optional
LayerSelector *OCILayerSelector `json:"layerSelector,omitempty"`
// The provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'.
// When not specified, defaults to 'generic'.
// +kubebuilder:validation:Enum=generic;aws;azure;gcp
// +kubebuilder:default:=generic
// +optional
Provider string `json:"provider,omitempty"`
// SecretRef contains the secret name containing the registry login
// credentials to resolve image metadata.
// The secret must be of type kubernetes.io/dockerconfigjson.
// +optional
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
// Verify contains the secret name containing the trusted public keys
// used to verify the signature and specifies which provider to use to check
// whether OCI image is authentic.
// +optional
Verify *apiv1.OCIRepositoryVerification `json:"verify,omitempty"`
// ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate
// the image pull if the service account has attached pull secrets. For more information:
// https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account
// +optional
ServiceAccountName string `json:"serviceAccountName,omitempty"`
// CertSecretRef can be given the name of a Secret containing
// either or both of
//
// - a PEM-encoded client certificate (`tls.crt`) and private
// key (`tls.key`);
// - a PEM-encoded CA certificate (`ca.crt`)
//
// and whichever are supplied, will be used for connecting to the
// registry. The client cert and key are useful if you are
// authenticating with a certificate; the CA cert is useful if
// you are using a self-signed server certificate. The Secret must
// be of type `Opaque` or `kubernetes.io/tls`.
//
// Note: Support for the `caFile`, `certFile` and `keyFile` keys have
// been deprecated.
// +optional
CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"`
// ProxySecretRef specifies the Secret containing the proxy configuration
// to use while communicating with the container registry.
// +optional
ProxySecretRef *meta.LocalObjectReference `json:"proxySecretRef,omitempty"`
// Interval at which the OCIRepository URL is checked for updates.
// This interval is approximate and may be subject to jitter to ensure
// efficient use of resources.
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
// +required
Interval metav1.Duration `json:"interval"`
// The timeout for remote OCI Repository operations like pulling, defaults to 60s.
// +kubebuilder:default="60s"
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$"
// +optional
Timeout *metav1.Duration `json:"timeout,omitempty"`
// Ignore overrides the set of excluded patterns in the .sourceignore format
// (which is the same as .gitignore). If not provided, a default will be used,
// consult the documentation for your version to find out what those are.
// +optional
Ignore *string `json:"ignore,omitempty"`
// Insecure allows connecting to a non-TLS HTTP container registry.
// +optional
Insecure bool `json:"insecure,omitempty"`
// This flag tells the controller to suspend the reconciliation of this source.
// +optional
Suspend bool `json:"suspend,omitempty"`
}
// OCIRepositoryRef defines the image reference for the OCIRepository's URL
type OCIRepositoryRef struct {
// Digest is the image digest to pull, takes precedence over SemVer.
// The value should be in the format 'sha256:<HASH>'.
// +optional
Digest string `json:"digest,omitempty"`
// SemVer is the range of tags to pull selecting the latest within
// the range, takes precedence over Tag.
// +optional
SemVer string `json:"semver,omitempty"`
// SemverFilter is a regex pattern to filter the tags within the SemVer range.
// +optional
SemverFilter string `json:"semverFilter,omitempty"`
// Tag is the image tag to pull, defaults to latest.
// +optional
Tag string `json:"tag,omitempty"`
}
// OCILayerSelector specifies which layer should be extracted from an OCI Artifact
type OCILayerSelector struct {
// MediaType specifies the OCI media type of the layer
// which should be extracted from the OCI Artifact. The
// first layer matching this type is selected.
// +optional
MediaType string `json:"mediaType,omitempty"`
// Operation specifies how the selected layer should be processed.
// By default, the layer compressed content is extracted to storage.
// When the operation is set to 'copy', the layer compressed content
// is persisted to storage as it is.
// +kubebuilder:validation:Enum=extract;copy
// +optional
Operation string `json:"operation,omitempty"`
}
// OCIRepositoryStatus defines the observed state of OCIRepository
type OCIRepositoryStatus struct {
// ObservedGeneration is the last observed generation.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
// Conditions holds the conditions for the OCIRepository.
// +optional
Conditions []metav1.Condition `json:"conditions,omitempty"`
// URL is the download link for the artifact output of the last OCI Repository sync.
// +optional
URL string `json:"url,omitempty"`
// Artifact represents the output of the last successful OCI Repository sync.
// +optional
Artifact *apiv1.Artifact `json:"artifact,omitempty"`
// ContentConfigChecksum is a checksum of all the configurations related to
// the content of the source artifact:
// - .spec.ignore
// - .spec.layerSelector
// observed in .status.observedGeneration version of the object. This can
// be used to determine if the content configuration has changed and the
// artifact needs to be rebuilt.
// It has the format of `<algo>:<checksum>`, for example: `sha256:<checksum>`.
//
// Deprecated: Replaced with explicit fields for observed artifact content
// config in the status.
// +optional
ContentConfigChecksum string `json:"contentConfigChecksum,omitempty"`
// ObservedIgnore is the observed exclusion patterns used for constructing
// the source artifact.
// +optional
ObservedIgnore *string `json:"observedIgnore,omitempty"`
// ObservedLayerSelector is the observed layer selector used for constructing
// the source artifact.
// +optional
ObservedLayerSelector *OCILayerSelector `json:"observedLayerSelector,omitempty"`
meta.ReconcileRequestStatus `json:",inline"`
}
const (
// OCIPullFailedReason signals that a pull operation failed.
OCIPullFailedReason string = "OCIArtifactPullFailed"
// OCILayerOperationFailedReason signals that an OCI layer operation failed.
OCILayerOperationFailedReason string = "OCIArtifactLayerOperationFailed"
)
// GetConditions returns the status conditions of the object.
func (in OCIRepository) GetConditions() []metav1.Condition {
return in.Status.Conditions
}
// SetConditions sets the status conditions on the object.
func (in *OCIRepository) SetConditions(conditions []metav1.Condition) {
in.Status.Conditions = conditions
}
// GetRequeueAfter returns the duration after which the OCIRepository must be
// reconciled again.
func (in OCIRepository) GetRequeueAfter() time.Duration {
return in.Spec.Interval.Duration
}
// GetArtifact returns the latest Artifact from the OCIRepository if present in
// the status sub-resource.
func (in *OCIRepository) GetArtifact() *apiv1.Artifact {
return in.Status.Artifact
}
// GetLayerMediaType returns the media type layer selector if found in spec.
func (in *OCIRepository) GetLayerMediaType() string {
if in.Spec.LayerSelector == nil {
return ""
}
return in.Spec.LayerSelector.MediaType
}
// GetLayerOperation returns the layer selector operation (defaults to extract).
func (in *OCIRepository) GetLayerOperation() string {
if in.Spec.LayerSelector == nil || in.Spec.LayerSelector.Operation == "" {
return OCILayerExtract
}
return in.Spec.LayerSelector.Operation
}
// +genclient
// +kubebuilder:object:root=true
// +kubebuilder:resource:shortName=ocirepo
// +kubebuilder:subresource:status
// +kubebuilder:deprecatedversion:warning="v1beta2 OCIRepository is deprecated, upgrade to v1"
// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url`
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
// OCIRepository is the Schema for the ocirepositories API
type OCIRepository struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec OCIRepositorySpec `json:"spec,omitempty"`
// +kubebuilder:default={"observedGeneration":-1}
Status OCIRepositoryStatus `json:"status,omitempty"`
}
// OCIRepositoryList contains a list of OCIRepository
// +kubebuilder:object:root=true
type OCIRepositoryList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []OCIRepository `json:"items"`
}
func init() {
SchemeBuilder.Register(&OCIRepository{}, &OCIRepositoryList{})
}

View File

@ -1,48 +0,0 @@
/*
Copyright 2022 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta2
import (
"time"
"k8s.io/apimachinery/pkg/runtime"
)
const (
// SourceIndexKey is the key used for indexing objects based on their
// referenced Source.
SourceIndexKey string = ".metadata.source"
)
// Source interface must be supported by all API types.
// Source is the interface that provides generic access to the Artifact and
// interval. It must be supported by all kinds of the source.toolkit.fluxcd.io
// API group.
//
// Deprecated: use the Source interface from api/v1 instead. This type will be
// removed in a future release.
//
// +k8s:deepcopy-gen=false
type Source interface {
runtime.Object
// GetRequeueAfter returns the duration after which the source must be
// reconciled again.
GetRequeueAfter() time.Duration
// GetArtifact returns the latest artifact from the source if present in
// the status sub-resource.
GetArtifact() *Artifact
}

View File

@ -1,26 +0,0 @@
/*
Copyright 2024 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta2
const (
// STSProviderAmazon represents the AWS provider for Security Token Service.
// Provides support for fetching temporary credentials from an AWS STS endpoint.
STSProviderAmazon string = "aws"
// STSProviderLDAP represents the LDAP provider for Security Token Service.
// Provides support for fetching temporary credentials from an LDAP endpoint.
STSProviderLDAP string = "ldap"
)

View File

@ -1,876 +0,0 @@
//go:build !ignore_autogenerated
/*
Copyright 2024 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package v1beta2
import (
"github.com/fluxcd/pkg/apis/acl"
"github.com/fluxcd/pkg/apis/meta"
apiv1 "github.com/fluxcd/source-controller/api/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Artifact) DeepCopyInto(out *Artifact) {
*out = *in
in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
if in.Size != nil {
in, out := &in.Size, &out.Size
*out = new(int64)
**out = **in
}
if in.Metadata != nil {
in, out := &in.Metadata, &out.Metadata
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Artifact.
func (in *Artifact) DeepCopy() *Artifact {
if in == nil {
return nil
}
out := new(Artifact)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Bucket) DeepCopyInto(out *Bucket) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Bucket.
func (in *Bucket) DeepCopy() *Bucket {
if in == nil {
return nil
}
out := new(Bucket)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Bucket) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BucketList) DeepCopyInto(out *BucketList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Bucket, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketList.
func (in *BucketList) DeepCopy() *BucketList {
if in == nil {
return nil
}
out := new(BucketList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *BucketList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BucketSTSSpec) DeepCopyInto(out *BucketSTSSpec) {
*out = *in
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
if in.CertSecretRef != nil {
in, out := &in.CertSecretRef, &out.CertSecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketSTSSpec.
func (in *BucketSTSSpec) DeepCopy() *BucketSTSSpec {
if in == nil {
return nil
}
out := new(BucketSTSSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BucketSpec) DeepCopyInto(out *BucketSpec) {
*out = *in
if in.STS != nil {
in, out := &in.STS, &out.STS
*out = new(BucketSTSSpec)
(*in).DeepCopyInto(*out)
}
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
if in.CertSecretRef != nil {
in, out := &in.CertSecretRef, &out.CertSecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
if in.ProxySecretRef != nil {
in, out := &in.ProxySecretRef, &out.ProxySecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
out.Interval = in.Interval
if in.Timeout != nil {
in, out := &in.Timeout, &out.Timeout
*out = new(v1.Duration)
**out = **in
}
if in.Ignore != nil {
in, out := &in.Ignore, &out.Ignore
*out = new(string)
**out = **in
}
if in.AccessFrom != nil {
in, out := &in.AccessFrom, &out.AccessFrom
*out = new(acl.AccessFrom)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketSpec.
func (in *BucketSpec) DeepCopy() *BucketSpec {
if in == nil {
return nil
}
out := new(BucketSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BucketStatus) DeepCopyInto(out *BucketStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]v1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Artifact != nil {
in, out := &in.Artifact, &out.Artifact
*out = new(apiv1.Artifact)
(*in).DeepCopyInto(*out)
}
if in.ObservedIgnore != nil {
in, out := &in.ObservedIgnore, &out.ObservedIgnore
*out = new(string)
**out = **in
}
out.ReconcileRequestStatus = in.ReconcileRequestStatus
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketStatus.
func (in *BucketStatus) DeepCopy() *BucketStatus {
if in == nil {
return nil
}
out := new(BucketStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GitRepository) DeepCopyInto(out *GitRepository) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepository.
func (in *GitRepository) DeepCopy() *GitRepository {
if in == nil {
return nil
}
out := new(GitRepository)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *GitRepository) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GitRepositoryInclude) DeepCopyInto(out *GitRepositoryInclude) {
*out = *in
out.GitRepositoryRef = in.GitRepositoryRef
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryInclude.
func (in *GitRepositoryInclude) DeepCopy() *GitRepositoryInclude {
if in == nil {
return nil
}
out := new(GitRepositoryInclude)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GitRepositoryList) DeepCopyInto(out *GitRepositoryList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]GitRepository, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryList.
func (in *GitRepositoryList) DeepCopy() *GitRepositoryList {
if in == nil {
return nil
}
out := new(GitRepositoryList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *GitRepositoryList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GitRepositoryRef) DeepCopyInto(out *GitRepositoryRef) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryRef.
func (in *GitRepositoryRef) DeepCopy() *GitRepositoryRef {
if in == nil {
return nil
}
out := new(GitRepositoryRef)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GitRepositorySpec) DeepCopyInto(out *GitRepositorySpec) {
*out = *in
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
out.Interval = in.Interval
if in.Timeout != nil {
in, out := &in.Timeout, &out.Timeout
*out = new(v1.Duration)
**out = **in
}
if in.Reference != nil {
in, out := &in.Reference, &out.Reference
*out = new(GitRepositoryRef)
**out = **in
}
if in.Verification != nil {
in, out := &in.Verification, &out.Verification
*out = new(GitRepositoryVerification)
**out = **in
}
if in.Ignore != nil {
in, out := &in.Ignore, &out.Ignore
*out = new(string)
**out = **in
}
if in.Include != nil {
in, out := &in.Include, &out.Include
*out = make([]GitRepositoryInclude, len(*in))
copy(*out, *in)
}
if in.AccessFrom != nil {
in, out := &in.AccessFrom, &out.AccessFrom
*out = new(acl.AccessFrom)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositorySpec.
func (in *GitRepositorySpec) DeepCopy() *GitRepositorySpec {
if in == nil {
return nil
}
out := new(GitRepositorySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GitRepositoryStatus) DeepCopyInto(out *GitRepositoryStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]v1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Artifact != nil {
in, out := &in.Artifact, &out.Artifact
*out = new(apiv1.Artifact)
(*in).DeepCopyInto(*out)
}
if in.IncludedArtifacts != nil {
in, out := &in.IncludedArtifacts, &out.IncludedArtifacts
*out = make([]*apiv1.Artifact, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(apiv1.Artifact)
(*in).DeepCopyInto(*out)
}
}
}
if in.ObservedIgnore != nil {
in, out := &in.ObservedIgnore, &out.ObservedIgnore
*out = new(string)
**out = **in
}
if in.ObservedInclude != nil {
in, out := &in.ObservedInclude, &out.ObservedInclude
*out = make([]GitRepositoryInclude, len(*in))
copy(*out, *in)
}
out.ReconcileRequestStatus = in.ReconcileRequestStatus
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryStatus.
func (in *GitRepositoryStatus) DeepCopy() *GitRepositoryStatus {
if in == nil {
return nil
}
out := new(GitRepositoryStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GitRepositoryVerification) DeepCopyInto(out *GitRepositoryVerification) {
*out = *in
out.SecretRef = in.SecretRef
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryVerification.
func (in *GitRepositoryVerification) DeepCopy() *GitRepositoryVerification {
if in == nil {
return nil
}
out := new(GitRepositoryVerification)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HelmChart) DeepCopyInto(out *HelmChart) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChart.
func (in *HelmChart) DeepCopy() *HelmChart {
if in == nil {
return nil
}
out := new(HelmChart)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *HelmChart) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HelmChartList) DeepCopyInto(out *HelmChartList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]HelmChart, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartList.
func (in *HelmChartList) DeepCopy() *HelmChartList {
if in == nil {
return nil
}
out := new(HelmChartList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *HelmChartList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HelmChartSpec) DeepCopyInto(out *HelmChartSpec) {
*out = *in
out.SourceRef = in.SourceRef
out.Interval = in.Interval
if in.ValuesFiles != nil {
in, out := &in.ValuesFiles, &out.ValuesFiles
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.AccessFrom != nil {
in, out := &in.AccessFrom, &out.AccessFrom
*out = new(acl.AccessFrom)
(*in).DeepCopyInto(*out)
}
if in.Verify != nil {
in, out := &in.Verify, &out.Verify
*out = new(apiv1.OCIRepositoryVerification)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartSpec.
func (in *HelmChartSpec) DeepCopy() *HelmChartSpec {
if in == nil {
return nil
}
out := new(HelmChartSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HelmChartStatus) DeepCopyInto(out *HelmChartStatus) {
*out = *in
if in.ObservedValuesFiles != nil {
in, out := &in.ObservedValuesFiles, &out.ObservedValuesFiles
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]v1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Artifact != nil {
in, out := &in.Artifact, &out.Artifact
*out = new(apiv1.Artifact)
(*in).DeepCopyInto(*out)
}
out.ReconcileRequestStatus = in.ReconcileRequestStatus
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartStatus.
func (in *HelmChartStatus) DeepCopy() *HelmChartStatus {
if in == nil {
return nil
}
out := new(HelmChartStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HelmRepository) DeepCopyInto(out *HelmRepository) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepository.
func (in *HelmRepository) DeepCopy() *HelmRepository {
if in == nil {
return nil
}
out := new(HelmRepository)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *HelmRepository) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HelmRepositoryList) DeepCopyInto(out *HelmRepositoryList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]HelmRepository, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepositoryList.
func (in *HelmRepositoryList) DeepCopy() *HelmRepositoryList {
if in == nil {
return nil
}
out := new(HelmRepositoryList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *HelmRepositoryList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HelmRepositorySpec) DeepCopyInto(out *HelmRepositorySpec) {
*out = *in
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
if in.CertSecretRef != nil {
in, out := &in.CertSecretRef, &out.CertSecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
out.Interval = in.Interval
if in.Timeout != nil {
in, out := &in.Timeout, &out.Timeout
*out = new(v1.Duration)
**out = **in
}
if in.AccessFrom != nil {
in, out := &in.AccessFrom, &out.AccessFrom
*out = new(acl.AccessFrom)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepositorySpec.
func (in *HelmRepositorySpec) DeepCopy() *HelmRepositorySpec {
if in == nil {
return nil
}
out := new(HelmRepositorySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HelmRepositoryStatus) DeepCopyInto(out *HelmRepositoryStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]v1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Artifact != nil {
in, out := &in.Artifact, &out.Artifact
*out = new(apiv1.Artifact)
(*in).DeepCopyInto(*out)
}
out.ReconcileRequestStatus = in.ReconcileRequestStatus
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepositoryStatus.
func (in *HelmRepositoryStatus) DeepCopy() *HelmRepositoryStatus {
if in == nil {
return nil
}
out := new(HelmRepositoryStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LocalHelmChartSourceReference) DeepCopyInto(out *LocalHelmChartSourceReference) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalHelmChartSourceReference.
func (in *LocalHelmChartSourceReference) DeepCopy() *LocalHelmChartSourceReference {
if in == nil {
return nil
}
out := new(LocalHelmChartSourceReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OCILayerSelector) DeepCopyInto(out *OCILayerSelector) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCILayerSelector.
func (in *OCILayerSelector) DeepCopy() *OCILayerSelector {
if in == nil {
return nil
}
out := new(OCILayerSelector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OCIRepository) DeepCopyInto(out *OCIRepository) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepository.
func (in *OCIRepository) DeepCopy() *OCIRepository {
if in == nil {
return nil
}
out := new(OCIRepository)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *OCIRepository) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OCIRepositoryList) DeepCopyInto(out *OCIRepositoryList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]OCIRepository, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryList.
func (in *OCIRepositoryList) DeepCopy() *OCIRepositoryList {
if in == nil {
return nil
}
out := new(OCIRepositoryList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *OCIRepositoryList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OCIRepositoryRef) DeepCopyInto(out *OCIRepositoryRef) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryRef.
func (in *OCIRepositoryRef) DeepCopy() *OCIRepositoryRef {
if in == nil {
return nil
}
out := new(OCIRepositoryRef)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OCIRepositorySpec) DeepCopyInto(out *OCIRepositorySpec) {
*out = *in
if in.Reference != nil {
in, out := &in.Reference, &out.Reference
*out = new(OCIRepositoryRef)
**out = **in
}
if in.LayerSelector != nil {
in, out := &in.LayerSelector, &out.LayerSelector
*out = new(OCILayerSelector)
**out = **in
}
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
if in.Verify != nil {
in, out := &in.Verify, &out.Verify
*out = new(apiv1.OCIRepositoryVerification)
(*in).DeepCopyInto(*out)
}
if in.CertSecretRef != nil {
in, out := &in.CertSecretRef, &out.CertSecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
if in.ProxySecretRef != nil {
in, out := &in.ProxySecretRef, &out.ProxySecretRef
*out = new(meta.LocalObjectReference)
**out = **in
}
out.Interval = in.Interval
if in.Timeout != nil {
in, out := &in.Timeout, &out.Timeout
*out = new(v1.Duration)
**out = **in
}
if in.Ignore != nil {
in, out := &in.Ignore, &out.Ignore
*out = new(string)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositorySpec.
func (in *OCIRepositorySpec) DeepCopy() *OCIRepositorySpec {
if in == nil {
return nil
}
out := new(OCIRepositorySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OCIRepositoryStatus) DeepCopyInto(out *OCIRepositoryStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]v1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Artifact != nil {
in, out := &in.Artifact, &out.Artifact
*out = new(apiv1.Artifact)
(*in).DeepCopyInto(*out)
}
if in.ObservedIgnore != nil {
in, out := &in.ObservedIgnore, &out.ObservedIgnore
*out = new(string)
**out = **in
}
if in.ObservedLayerSelector != nil {
in, out := &in.ObservedLayerSelector, &out.ObservedLayerSelector
*out = new(OCILayerSelector)
**out = **in
}
out.ReconcileRequestStatus = in.ReconcileRequestStatus
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryStatus.
func (in *OCIRepositoryStatus) DeepCopy() *OCIRepositoryStatus {
if in == nil {
return nil
}
out := new(OCIRepositoryStatus)
in.DeepCopyInto(out)
return out
}

View File

@ -1,9 +1,11 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.16.1
controller-gen.kubebuilder.io/version: v0.7.0
creationTimestamp: null
name: buckets.source.toolkit.fluxcd.io
spec:
group: source.toolkit.fluxcd.io
@ -14,359 +16,6 @@ spec:
singular: bucket
scope: Namespaced
versions:
- additionalPrinterColumns:
- jsonPath: .spec.endpoint
name: Endpoint
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
- jsonPath: .status.conditions[?(@.type=="Ready")].status
name: Ready
type: string
- jsonPath: .status.conditions[?(@.type=="Ready")].message
name: Status
type: string
name: v1
schema:
openAPIV3Schema:
description: Bucket is the Schema for the buckets API.
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: |-
BucketSpec specifies the required configuration to produce an Artifact for
an object storage bucket.
properties:
bucketName:
description: BucketName is the name of the object storage bucket.
type: string
certSecretRef:
description: |-
CertSecretRef can be given the name of a Secret containing
either or both of
- a PEM-encoded client certificate (`tls.crt`) and private
key (`tls.key`);
- a PEM-encoded CA certificate (`ca.crt`)
and whichever are supplied, will be used for connecting to the
bucket. The client cert and key are useful if you are
authenticating with a certificate; the CA cert is useful if
you are using a self-signed server certificate. The Secret must
be of type `Opaque` or `kubernetes.io/tls`.
This field is only supported for the `generic` provider.
properties:
name:
description: Name of the referent.
type: string
required:
- name
type: object
endpoint:
description: Endpoint is the object storage address the BucketName
is located at.
type: string
ignore:
description: |-
Ignore overrides the set of excluded patterns in the .sourceignore format
(which is the same as .gitignore). If not provided, a default will be used,
consult the documentation for your version to find out what those are.
type: string
insecure:
description: Insecure allows connecting to a non-TLS HTTP Endpoint.
type: boolean
interval:
description: |-
Interval at which the Bucket Endpoint is checked for updates.
This interval is approximate and may be subject to jitter to ensure
efficient use of resources.
pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$
type: string
prefix:
description: Prefix to use for server-side filtering of files in the
Bucket.
type: string
provider:
default: generic
description: |-
Provider of the object storage bucket.
Defaults to 'generic', which expects an S3 (API) compatible object
storage.
enum:
- generic
- aws
- gcp
- azure
type: string
proxySecretRef:
description: |-
ProxySecretRef specifies the Secret containing the proxy configuration
to use while communicating with the Bucket server.
properties:
name:
description: Name of the referent.
type: string
required:
- name
type: object
region:
description: Region of the Endpoint where the BucketName is located
in.
type: string
secretRef:
description: |-
SecretRef specifies the Secret containing authentication credentials
for the Bucket.
properties:
name:
description: Name of the referent.
type: string
required:
- name
type: object
sts:
description: |-
STS specifies the required configuration to use a Security Token
Service for fetching temporary credentials to authenticate in a
Bucket provider.
This field is only supported for the `aws` and `generic` providers.
properties:
certSecretRef:
description: |-
CertSecretRef can be given the name of a Secret containing
either or both of
- a PEM-encoded client certificate (`tls.crt`) and private
key (`tls.key`);
- a PEM-encoded CA certificate (`ca.crt`)
and whichever are supplied, will be used for connecting to the
STS endpoint. The client cert and key are useful if you are
authenticating with a certificate; the CA cert is useful if
you are using a self-signed server certificate. The Secret must
be of type `Opaque` or `kubernetes.io/tls`.
This field is only supported for the `ldap` provider.
properties:
name:
description: Name of the referent.
type: string
required:
- name
type: object
endpoint:
description: |-
Endpoint is the HTTP/S endpoint of the Security Token Service from
where temporary credentials will be fetched.
pattern: ^(http|https)://.*$
type: string
provider:
description: Provider of the Security Token Service.
enum:
- aws
- ldap
type: string
secretRef:
description: |-
SecretRef specifies the Secret containing authentication credentials
for the STS endpoint. This Secret must contain the fields `username`
and `password` and is supported only for the `ldap` provider.
properties:
name:
description: Name of the referent.
type: string
required:
- name
type: object
required:
- endpoint
- provider
type: object
suspend:
description: |-
Suspend tells the controller to suspend the reconciliation of this
Bucket.
type: boolean
timeout:
default: 60s
description: Timeout for fetch operations, defaults to 60s.
pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$
type: string
required:
- bucketName
- endpoint
- interval
type: object
x-kubernetes-validations:
- message: STS configuration is only supported for the 'aws' and 'generic'
Bucket providers
rule: self.provider == 'aws' || self.provider == 'generic' || !has(self.sts)
- message: '''aws'' is the only supported STS provider for the ''aws''
Bucket provider'
rule: self.provider != 'aws' || !has(self.sts) || self.sts.provider
== 'aws'
- message: '''ldap'' is the only supported STS provider for the ''generic''
Bucket provider'
rule: self.provider != 'generic' || !has(self.sts) || self.sts.provider
== 'ldap'
- message: spec.sts.secretRef is not required for the 'aws' STS provider
rule: '!has(self.sts) || self.sts.provider != ''aws'' || !has(self.sts.secretRef)'
- message: spec.sts.certSecretRef is not required for the 'aws' STS provider
rule: '!has(self.sts) || self.sts.provider != ''aws'' || !has(self.sts.certSecretRef)'
status:
default:
observedGeneration: -1
description: BucketStatus records the observed state of a Bucket.
properties:
artifact:
description: Artifact represents the last successful Bucket reconciliation.
properties:
digest:
description: Digest is the digest of the file in the form of '<algorithm>:<checksum>'.
pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$
type: string
lastUpdateTime:
description: |-
LastUpdateTime is the timestamp corresponding to the last update of the
Artifact.
format: date-time
type: string
metadata:
additionalProperties:
type: string
description: Metadata holds upstream information such as OCI annotations.
type: object
path:
description: |-
Path is the relative file path of the Artifact. It can be used to locate
the file in the root of the Artifact storage on the local file system of
the controller managing the Source.
type: string
revision:
description: |-
Revision is a human-readable identifier traceable in the origin source
system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.
type: string
size:
description: Size is the number of bytes in the file.
format: int64
type: integer
url:
description: |-
URL is the HTTP address of the Artifact as exposed by the controller
managing the Source. It can be used to retrieve the Artifact for
consumption, e.g. by another controller applying the Artifact contents.
type: string
required:
- lastUpdateTime
- path
- revision
- url
type: object
conditions:
description: Conditions holds the conditions for the Bucket.
items:
description: Condition contains details for one aspect of the current
state of this API Resource.
properties:
lastTransitionTime:
description: |-
lastTransitionTime is the last time the condition transitioned from one status to another.
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: |-
message is a human readable message indicating details about the transition.
This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: |-
observedGeneration represents the .metadata.generation that the condition was set based upon.
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: |-
reason contains a programmatic identifier indicating the reason for the condition's last transition.
Producers of specific condition types may define expected values and meanings for this field,
and whether the values are considered a guaranteed API.
The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True, False, Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
required:
- lastTransitionTime
- message
- reason
- status
- type
type: object
type: array
lastHandledReconcileAt:
description: |-
LastHandledReconcileAt holds the value of the most recent
reconcile request value, so a change of the annotation value
can be detected.
type: string
observedGeneration:
description: ObservedGeneration is the last observed generation of
the Bucket object.
format: int64
type: integer
observedIgnore:
description: |-
ObservedIgnore is the observed exclusion patterns used for constructing
the source artifact.
type: string
url:
description: |-
URL is the dynamic fetch link for the latest Artifact.
It is provided on a "best effort" basis, and using the precise
BucketStatus.Artifact data is recommended.
type: string
type: object
type: object
served: true
storage: true
subresources:
status: {}
- additionalPrinterColumns:
- jsonPath: .spec.endpoint
name: Endpoint
@ -380,27 +29,20 @@ spec:
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
deprecated: true
deprecationWarning: v1beta1 Bucket is deprecated, upgrade to v1
name: v1beta1
schema:
openAPIV3Schema:
description: Bucket is the Schema for the buckets API
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
@ -413,21 +55,22 @@ spec:
cross-namespace references to this object.
properties:
namespaceSelectors:
description: |-
NamespaceSelectors is the list of namespace selectors to which this ACL applies.
Items in this list are evaluated using a logical OR operation.
description: NamespaceSelectors is the list of namespace selectors
to which this ACL applies. Items in this list are evaluated
using a logical OR operation.
items:
description: |-
NamespaceSelector selects the namespaces to which this ACL applies.
An empty map of MatchLabels matches all namespaces in a cluster.
description: NamespaceSelector selects the namespaces to which
this ACL applies. An empty map of MatchLabels matches all
namespaces in a cluster.
properties:
matchLabels:
additionalProperties:
type: string
description: |-
MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions, whose key field is "key", the
operator is "In", and the values array contains only "value". The requirements are ANDed.
description: MatchLabels is a map of {key,value} pairs.
A single {key,value} in the matchLabels map is equivalent
to an element of matchExpressions, whose key field is
"key", the operator is "In", and the values array contains
only "value". The requirements are ANDed.
type: object
type: object
type: array
@ -441,10 +84,10 @@ spec:
description: The bucket endpoint address.
type: string
ignore:
description: |-
Ignore overrides the set of excluded patterns in the .sourceignore format
(which is the same as .gitignore). If not provided, a default will be used,
consult the documentation for your version to find out what those are.
description: Ignore overrides the set of excluded patterns in the
.sourceignore format (which is the same as .gitignore). If not provided,
a default will be used, consult the documentation for your version
to find out what those are.
type: string
insecure:
description: Insecure allows connecting to a non-TLS S3 HTTP endpoint.
@ -464,12 +107,11 @@ spec:
description: The bucket region.
type: string
secretRef:
description: |-
The name of the secret containing authentication credentials
description: The name of the secret containing authentication credentials
for the Bucket.
properties:
name:
description: Name of the referent.
description: Name of the referent
type: string
required:
- name
@ -500,60 +142,66 @@ spec:
description: Checksum is the SHA256 checksum of the artifact.
type: string
lastUpdateTime:
description: |-
LastUpdateTime is the timestamp corresponding to the last update of this
artifact.
description: LastUpdateTime is the timestamp corresponding to
the last update of this artifact.
format: date-time
type: string
path:
description: Path is the relative file path of this artifact.
type: string
revision:
description: |-
Revision is a human readable identifier traceable in the origin source
system. It can be a Git commit SHA, Git tag, a Helm index timestamp, a Helm
chart version, etc.
description: Revision is a human readable identifier traceable
in the origin source system. It can be a Git commit SHA, Git
tag, a Helm index timestamp, a Helm chart version, etc.
type: string
url:
description: URL is the HTTP address of this artifact.
type: string
required:
- lastUpdateTime
- path
- url
type: object
conditions:
description: Conditions holds the conditions for the Bucket.
items:
description: Condition contains details for one aspect of the current
state of this API Resource.
description: "Condition contains details for one aspect of the current
state of this API Resource. --- This struct is intended for direct
use as an array at the field path .status.conditions. For example,
type FooStatus struct{ // Represents the observations of a
foo's current state. // Known .status.conditions.type are:
\"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type
\ // +patchStrategy=merge // +listType=map // +listMapKey=type
\ Conditions []metav1.Condition `json:\"conditions,omitempty\"
patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`
\n // other fields }"
properties:
lastTransitionTime:
description: |-
lastTransitionTime is the last time the condition transitioned from one status to another.
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
description: lastTransitionTime is the last time the condition
transitioned from one status to another. This should be when
the underlying condition changed. If that is not known, then
using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: |-
message is a human readable message indicating details about the transition.
This may be an empty string.
description: message is a human readable message indicating
details about the transition. This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: |-
observedGeneration represents the .metadata.generation that the condition was set based upon.
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
with respect to the current state of the instance.
description: observedGeneration represents the .metadata.generation
that the condition was set based upon. For instance, if .metadata.generation
is currently 12, but the .status.conditions[x].observedGeneration
is 9, the condition is out of date with respect to the current
state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: |-
reason contains a programmatic identifier indicating the reason for the condition's last transition.
Producers of specific condition types may define expected values and meanings for this field,
and whether the values are considered a guaranteed API.
The value should be a CamelCase string.
description: reason contains a programmatic identifier indicating
the reason for the condition's last transition. Producers
of specific condition types may define expected values and
meanings for this field, and whether the values are considered
a guaranteed API. The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
@ -568,6 +216,10 @@ spec:
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
--- Many .condition.type values are consistent across resources
like Available, but because arbitrary conditions can be useful
(see .node.status.conditions), the ability to deconflict is
important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
@ -580,10 +232,8 @@ spec:
type: object
type: array
lastHandledReconcileAt:
description: |-
LastHandledReconcileAt holds the value of the most recent
reconcile request value, so a change of the annotation value
can be detected.
description: LastHandledReconcileAt holds the value of the most recent
reconcile request value, so a change can be detected.
type: string
observedGeneration:
description: ObservedGeneration is the last observed generation.
@ -596,389 +246,12 @@ spec:
type: object
type: object
served: true
storage: false
storage: true
subresources:
status: {}
- additionalPrinterColumns:
- jsonPath: .spec.endpoint
name: Endpoint
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
- jsonPath: .status.conditions[?(@.type=="Ready")].status
name: Ready
type: string
- jsonPath: .status.conditions[?(@.type=="Ready")].message
name: Status
type: string
deprecated: true
deprecationWarning: v1beta2 Bucket is deprecated, upgrade to v1
name: v1beta2
schema:
openAPIV3Schema:
description: Bucket is the Schema for the buckets API.
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: |-
BucketSpec specifies the required configuration to produce an Artifact for
an object storage bucket.
properties:
accessFrom:
description: |-
AccessFrom specifies an Access Control List for allowing cross-namespace
references to this object.
NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092
properties:
namespaceSelectors:
description: |-
NamespaceSelectors is the list of namespace selectors to which this ACL applies.
Items in this list are evaluated using a logical OR operation.
items:
description: |-
NamespaceSelector selects the namespaces to which this ACL applies.
An empty map of MatchLabels matches all namespaces in a cluster.
properties:
matchLabels:
additionalProperties:
type: string
description: |-
MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions, whose key field is "key", the
operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
type: array
required:
- namespaceSelectors
type: object
bucketName:
description: BucketName is the name of the object storage bucket.
type: string
certSecretRef:
description: |-
CertSecretRef can be given the name of a Secret containing
either or both of
- a PEM-encoded client certificate (`tls.crt`) and private
key (`tls.key`);
- a PEM-encoded CA certificate (`ca.crt`)
and whichever are supplied, will be used for connecting to the
bucket. The client cert and key are useful if you are
authenticating with a certificate; the CA cert is useful if
you are using a self-signed server certificate. The Secret must
be of type `Opaque` or `kubernetes.io/tls`.
This field is only supported for the `generic` provider.
properties:
name:
description: Name of the referent.
type: string
required:
- name
type: object
endpoint:
description: Endpoint is the object storage address the BucketName
is located at.
type: string
ignore:
description: |-
Ignore overrides the set of excluded patterns in the .sourceignore format
(which is the same as .gitignore). If not provided, a default will be used,
consult the documentation for your version to find out what those are.
type: string
insecure:
description: Insecure allows connecting to a non-TLS HTTP Endpoint.
type: boolean
interval:
description: |-
Interval at which the Bucket Endpoint is checked for updates.
This interval is approximate and may be subject to jitter to ensure
efficient use of resources.
pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$
type: string
prefix:
description: Prefix to use for server-side filtering of files in the
Bucket.
type: string
provider:
default: generic
description: |-
Provider of the object storage bucket.
Defaults to 'generic', which expects an S3 (API) compatible object
storage.
enum:
- generic
- aws
- gcp
- azure
type: string
proxySecretRef:
description: |-
ProxySecretRef specifies the Secret containing the proxy configuration
to use while communicating with the Bucket server.
properties:
name:
description: Name of the referent.
type: string
required:
- name
type: object
region:
description: Region of the Endpoint where the BucketName is located
in.
type: string
secretRef:
description: |-
SecretRef specifies the Secret containing authentication credentials
for the Bucket.
properties:
name:
description: Name of the referent.
type: string
required:
- name
type: object
sts:
description: |-
STS specifies the required configuration to use a Security Token
Service for fetching temporary credentials to authenticate in a
Bucket provider.
This field is only supported for the `aws` and `generic` providers.
properties:
certSecretRef:
description: |-
CertSecretRef can be given the name of a Secret containing
either or both of
- a PEM-encoded client certificate (`tls.crt`) and private
key (`tls.key`);
- a PEM-encoded CA certificate (`ca.crt`)
and whichever are supplied, will be used for connecting to the
STS endpoint. The client cert and key are useful if you are
authenticating with a certificate; the CA cert is useful if
you are using a self-signed server certificate. The Secret must
be of type `Opaque` or `kubernetes.io/tls`.
This field is only supported for the `ldap` provider.
properties:
name:
description: Name of the referent.
type: string
required:
- name
type: object
endpoint:
description: |-
Endpoint is the HTTP/S endpoint of the Security Token Service from
where temporary credentials will be fetched.
pattern: ^(http|https)://.*$
type: string
provider:
description: Provider of the Security Token Service.
enum:
- aws
- ldap
type: string
secretRef:
description: |-
SecretRef specifies the Secret containing authentication credentials
for the STS endpoint. This Secret must contain the fields `username`
and `password` and is supported only for the `ldap` provider.
properties:
name:
description: Name of the referent.
type: string
required:
- name
type: object
required:
- endpoint
- provider
type: object
suspend:
description: |-
Suspend tells the controller to suspend the reconciliation of this
Bucket.
type: boolean
timeout:
default: 60s
description: Timeout for fetch operations, defaults to 60s.
pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$
type: string
required:
- bucketName
- endpoint
- interval
type: object
x-kubernetes-validations:
- message: STS configuration is only supported for the 'aws' and 'generic'
Bucket providers
rule: self.provider == 'aws' || self.provider == 'generic' || !has(self.sts)
- message: '''aws'' is the only supported STS provider for the ''aws''
Bucket provider'
rule: self.provider != 'aws' || !has(self.sts) || self.sts.provider
== 'aws'
- message: '''ldap'' is the only supported STS provider for the ''generic''
Bucket provider'
rule: self.provider != 'generic' || !has(self.sts) || self.sts.provider
== 'ldap'
- message: spec.sts.secretRef is not required for the 'aws' STS provider
rule: '!has(self.sts) || self.sts.provider != ''aws'' || !has(self.sts.secretRef)'
- message: spec.sts.certSecretRef is not required for the 'aws' STS provider
rule: '!has(self.sts) || self.sts.provider != ''aws'' || !has(self.sts.certSecretRef)'
status:
default:
observedGeneration: -1
description: BucketStatus records the observed state of a Bucket.
properties:
artifact:
description: Artifact represents the last successful Bucket reconciliation.
properties:
digest:
description: Digest is the digest of the file in the form of '<algorithm>:<checksum>'.
pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$
type: string
lastUpdateTime:
description: |-
LastUpdateTime is the timestamp corresponding to the last update of the
Artifact.
format: date-time
type: string
metadata:
additionalProperties:
type: string
description: Metadata holds upstream information such as OCI annotations.
type: object
path:
description: |-
Path is the relative file path of the Artifact. It can be used to locate
the file in the root of the Artifact storage on the local file system of
the controller managing the Source.
type: string
revision:
description: |-
Revision is a human-readable identifier traceable in the origin source
system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.
type: string
size:
description: Size is the number of bytes in the file.
format: int64
type: integer
url:
description: |-
URL is the HTTP address of the Artifact as exposed by the controller
managing the Source. It can be used to retrieve the Artifact for
consumption, e.g. by another controller applying the Artifact contents.
type: string
required:
- lastUpdateTime
- path
- revision
- url
type: object
conditions:
description: Conditions holds the conditions for the Bucket.
items:
description: Condition contains details for one aspect of the current
state of this API Resource.
properties:
lastTransitionTime:
description: |-
lastTransitionTime is the last time the condition transitioned from one status to another.
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: |-
message is a human readable message indicating details about the transition.
This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: |-
observedGeneration represents the .metadata.generation that the condition was set based upon.
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: |-
reason contains a programmatic identifier indicating the reason for the condition's last transition.
Producers of specific condition types may define expected values and meanings for this field,
and whether the values are considered a guaranteed API.
The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True, False, Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
required:
- lastTransitionTime
- message
- reason
- status
- type
type: object
type: array
lastHandledReconcileAt:
description: |-
LastHandledReconcileAt holds the value of the most recent
reconcile request value, so a change of the annotation value
can be detected.
type: string
observedGeneration:
description: ObservedGeneration is the last observed generation of
the Bucket object.
format: int64
type: integer
observedIgnore:
description: |-
ObservedIgnore is the observed exclusion patterns used for constructing
the source artifact.
type: string
url:
description: |-
URL is the dynamic fetch link for the latest Artifact.
It is provided on a "best effort" basis, and using the precise
BucketStatus.Artifact data is recommended.
type: string
type: object
type: object
served: true
storage: false
subresources:
status: {}
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

File diff suppressed because it is too large Load Diff

View File

@ -1,9 +1,11 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.16.1
controller-gen.kubebuilder.io/version: v0.7.0
creationTimestamp: null
name: helmcharts.source.toolkit.fluxcd.io
spec:
group: source.toolkit.fluxcd.io
@ -16,338 +18,6 @@ spec:
singular: helmchart
scope: Namespaced
versions:
- additionalPrinterColumns:
- jsonPath: .spec.chart
name: Chart
type: string
- jsonPath: .spec.version
name: Version
type: string
- jsonPath: .spec.sourceRef.kind
name: Source Kind
type: string
- jsonPath: .spec.sourceRef.name
name: Source Name
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
- jsonPath: .status.conditions[?(@.type=="Ready")].status
name: Ready
type: string
- jsonPath: .status.conditions[?(@.type=="Ready")].message
name: Status
type: string
name: v1
schema:
openAPIV3Schema:
description: HelmChart is the Schema for the helmcharts API.
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: HelmChartSpec specifies the desired state of a Helm chart.
properties:
chart:
description: |-
Chart is the name or path the Helm chart is available at in the
SourceRef.
type: string
ignoreMissingValuesFiles:
description: |-
IgnoreMissingValuesFiles controls whether to silently ignore missing values
files rather than failing.
type: boolean
interval:
description: |-
Interval at which the HelmChart SourceRef is checked for updates.
This interval is approximate and may be subject to jitter to ensure
efficient use of resources.
pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$
type: string
reconcileStrategy:
default: ChartVersion
description: |-
ReconcileStrategy determines what enables the creation of a new artifact.
Valid values are ('ChartVersion', 'Revision').
See the documentation of the values for an explanation on their behavior.
Defaults to ChartVersion when omitted.
enum:
- ChartVersion
- Revision
type: string
sourceRef:
description: SourceRef is the reference to the Source the chart is
available at.
properties:
apiVersion:
description: APIVersion of the referent.
type: string
kind:
description: |-
Kind of the referent, valid values are ('HelmRepository', 'GitRepository',
'Bucket').
enum:
- HelmRepository
- GitRepository
- Bucket
type: string
name:
description: Name of the referent.
type: string
required:
- kind
- name
type: object
suspend:
description: |-
Suspend tells the controller to suspend the reconciliation of this
source.
type: boolean
valuesFiles:
description: |-
ValuesFiles is an alternative list of values files to use as the chart
values (values.yaml is not included by default), expected to be a
relative path in the SourceRef.
Values files are merged in the order of this list with the last file
overriding the first. Ignored when omitted.
items:
type: string
type: array
verify:
description: |-
Verify contains the secret name containing the trusted public keys
used to verify the signature and specifies which provider to use to check
whether OCI image is authentic.
This field is only supported when using HelmRepository source with spec.type 'oci'.
Chart dependencies, which are not bundled in the umbrella chart artifact, are not verified.
properties:
matchOIDCIdentity:
description: |-
MatchOIDCIdentity specifies the identity matching criteria to use
while verifying an OCI artifact which was signed using Cosign keyless
signing. The artifact's identity is deemed to be verified if any of the
specified matchers match against the identity.
items:
description: |-
OIDCIdentityMatch specifies options for verifying the certificate identity,
i.e. the issuer and the subject of the certificate.
properties:
issuer:
description: |-
Issuer specifies the regex pattern to match against to verify
the OIDC issuer in the Fulcio certificate. The pattern must be a
valid Go regular expression.
type: string
subject:
description: |-
Subject specifies the regex pattern to match against to verify
the identity subject in the Fulcio certificate. The pattern must
be a valid Go regular expression.
type: string
required:
- issuer
- subject
type: object
type: array
provider:
default: cosign
description: Provider specifies the technology used to sign the
OCI Artifact.
enum:
- cosign
- notation
type: string
secretRef:
description: |-
SecretRef specifies the Kubernetes Secret containing the
trusted public keys.
properties:
name:
description: Name of the referent.
type: string
required:
- name
type: object
required:
- provider
type: object
version:
default: '*'
description: |-
Version is the chart version semver expression, ignored for charts from
GitRepository and Bucket sources. Defaults to latest when omitted.
type: string
required:
- chart
- interval
- sourceRef
type: object
status:
default:
observedGeneration: -1
description: HelmChartStatus records the observed state of the HelmChart.
properties:
artifact:
description: Artifact represents the output of the last successful
reconciliation.
properties:
digest:
description: Digest is the digest of the file in the form of '<algorithm>:<checksum>'.
pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$
type: string
lastUpdateTime:
description: |-
LastUpdateTime is the timestamp corresponding to the last update of the
Artifact.
format: date-time
type: string
metadata:
additionalProperties:
type: string
description: Metadata holds upstream information such as OCI annotations.
type: object
path:
description: |-
Path is the relative file path of the Artifact. It can be used to locate
the file in the root of the Artifact storage on the local file system of
the controller managing the Source.
type: string
revision:
description: |-
Revision is a human-readable identifier traceable in the origin source
system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.
type: string
size:
description: Size is the number of bytes in the file.
format: int64
type: integer
url:
description: |-
URL is the HTTP address of the Artifact as exposed by the controller
managing the Source. It can be used to retrieve the Artifact for
consumption, e.g. by another controller applying the Artifact contents.
type: string
required:
- lastUpdateTime
- path
- revision
- url
type: object
conditions:
description: Conditions holds the conditions for the HelmChart.
items:
description: Condition contains details for one aspect of the current
state of this API Resource.
properties:
lastTransitionTime:
description: |-
lastTransitionTime is the last time the condition transitioned from one status to another.
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: |-
message is a human readable message indicating details about the transition.
This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: |-
observedGeneration represents the .metadata.generation that the condition was set based upon.
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: |-
reason contains a programmatic identifier indicating the reason for the condition's last transition.
Producers of specific condition types may define expected values and meanings for this field,
and whether the values are considered a guaranteed API.
The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True, False, Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
required:
- lastTransitionTime
- message
- reason
- status
- type
type: object
type: array
lastHandledReconcileAt:
description: |-
LastHandledReconcileAt holds the value of the most recent
reconcile request value, so a change of the annotation value
can be detected.
type: string
observedChartName:
description: |-
ObservedChartName is the last observed chart name as specified by the
resolved chart reference.
type: string
observedGeneration:
description: |-
ObservedGeneration is the last observed generation of the HelmChart
object.
format: int64
type: integer
observedSourceArtifactRevision:
description: |-
ObservedSourceArtifactRevision is the last observed Artifact.Revision
of the HelmChartSpec.SourceRef.
type: string
observedValuesFiles:
description: |-
ObservedValuesFiles are the observed value files of the last successful
reconciliation.
It matches the chart in the last successfully reconciled artifact.
items:
type: string
type: array
url:
description: |-
URL is the dynamic fetch link for the latest Artifact.
It is provided on a "best effort" basis, and using the precise
BucketStatus.Artifact data is recommended.
type: string
type: object
type: object
served: true
storage: true
subresources:
status: {}
- additionalPrinterColumns:
- jsonPath: .spec.chart
name: Chart
@ -370,27 +40,20 @@ spec:
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
deprecated: true
deprecationWarning: v1beta1 HelmChart is deprecated, upgrade to v1
name: v1beta1
schema:
openAPIV3Schema:
description: HelmChart is the Schema for the helmcharts API
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
@ -402,21 +65,22 @@ spec:
cross-namespace references to this object.
properties:
namespaceSelectors:
description: |-
NamespaceSelectors is the list of namespace selectors to which this ACL applies.
Items in this list are evaluated using a logical OR operation.
description: NamespaceSelectors is the list of namespace selectors
to which this ACL applies. Items in this list are evaluated
using a logical OR operation.
items:
description: |-
NamespaceSelector selects the namespaces to which this ACL applies.
An empty map of MatchLabels matches all namespaces in a cluster.
description: NamespaceSelector selects the namespaces to which
this ACL applies. An empty map of MatchLabels matches all
namespaces in a cluster.
properties:
matchLabels:
additionalProperties:
type: string
description: |-
MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions, whose key field is "key", the
operator is "In", and the values array contains only "value". The requirements are ANDed.
description: MatchLabels is a map of {key,value} pairs.
A single {key,value} in the matchLabels map is equivalent
to an element of matchExpressions, whose key field is
"key", the operator is "In", and the values array contains
only "value". The requirements are ANDed.
type: object
type: object
type: array
@ -432,11 +96,10 @@ spec:
type: string
reconcileStrategy:
default: ChartVersion
description: |-
Determines what enables the creation of a new artifact. Valid values are
('ChartVersion', 'Revision').
See the documentation of the values for an explanation on their behavior.
Defaults to ChartVersion when omitted.
description: Determines what enables the creation of a new artifact.
Valid values are ('ChartVersion', 'Revision'). See the documentation
of the values for an explanation on their behavior. Defaults to
ChartVersion when omitted.
enum:
- ChartVersion
- Revision
@ -448,9 +111,8 @@ spec:
description: APIVersion of the referent.
type: string
kind:
description: |-
Kind of the referent, valid values are ('HelmRepository', 'GitRepository',
'Bucket').
description: Kind of the referent, valid values are ('HelmRepository',
'GitRepository', 'Bucket').
enum:
- HelmRepository
- GitRepository
@ -468,26 +130,24 @@ spec:
of this source.
type: boolean
valuesFile:
description: |-
Alternative values file to use as the default chart values, expected to
be a relative path in the SourceRef. Deprecated in favor of ValuesFiles,
for backwards compatibility the file defined here is merged before the
ValuesFiles items. Ignored when omitted.
description: Alternative values file to use as the default chart values,
expected to be a relative path in the SourceRef. Deprecated in favor
of ValuesFiles, for backwards compatibility the file defined here
is merged before the ValuesFiles items. Ignored when omitted.
type: string
valuesFiles:
description: |-
Alternative list of values files to use as the chart values (values.yaml
is not included by default), expected to be a relative path in the SourceRef.
Values files are merged in the order of this list with the last file overriding
the first. Ignored when omitted.
description: Alternative list of values files to use as the chart
values (values.yaml is not included by default), expected to be
a relative path in the SourceRef. Values files are merged in the
order of this list with the last file overriding the first. Ignored
when omitted.
items:
type: string
type: array
version:
default: '*'
description: |-
The chart version semver expression, ignored for charts from GitRepository
and Bucket sources. Defaults to latest when omitted.
description: The chart version semver expression, ignored for charts
from GitRepository and Bucket sources. Defaults to latest when omitted.
type: string
required:
- chart
@ -507,60 +167,66 @@ spec:
description: Checksum is the SHA256 checksum of the artifact.
type: string
lastUpdateTime:
description: |-
LastUpdateTime is the timestamp corresponding to the last update of this
artifact.
description: LastUpdateTime is the timestamp corresponding to
the last update of this artifact.
format: date-time
type: string
path:
description: Path is the relative file path of this artifact.
type: string
revision:
description: |-
Revision is a human readable identifier traceable in the origin source
system. It can be a Git commit SHA, Git tag, a Helm index timestamp, a Helm
chart version, etc.
description: Revision is a human readable identifier traceable
in the origin source system. It can be a Git commit SHA, Git
tag, a Helm index timestamp, a Helm chart version, etc.
type: string
url:
description: URL is the HTTP address of this artifact.
type: string
required:
- lastUpdateTime
- path
- url
type: object
conditions:
description: Conditions holds the conditions for the HelmChart.
items:
description: Condition contains details for one aspect of the current
state of this API Resource.
description: "Condition contains details for one aspect of the current
state of this API Resource. --- This struct is intended for direct
use as an array at the field path .status.conditions. For example,
type FooStatus struct{ // Represents the observations of a
foo's current state. // Known .status.conditions.type are:
\"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type
\ // +patchStrategy=merge // +listType=map // +listMapKey=type
\ Conditions []metav1.Condition `json:\"conditions,omitempty\"
patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`
\n // other fields }"
properties:
lastTransitionTime:
description: |-
lastTransitionTime is the last time the condition transitioned from one status to another.
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
description: lastTransitionTime is the last time the condition
transitioned from one status to another. This should be when
the underlying condition changed. If that is not known, then
using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: |-
message is a human readable message indicating details about the transition.
This may be an empty string.
description: message is a human readable message indicating
details about the transition. This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: |-
observedGeneration represents the .metadata.generation that the condition was set based upon.
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
with respect to the current state of the instance.
description: observedGeneration represents the .metadata.generation
that the condition was set based upon. For instance, if .metadata.generation
is currently 12, but the .status.conditions[x].observedGeneration
is 9, the condition is out of date with respect to the current
state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: |-
reason contains a programmatic identifier indicating the reason for the condition's last transition.
Producers of specific condition types may define expected values and meanings for this field,
and whether the values are considered a guaranteed API.
The value should be a CamelCase string.
description: reason contains a programmatic identifier indicating
the reason for the condition's last transition. Producers
of specific condition types may define expected values and
meanings for this field, and whether the values are considered
a guaranteed API. The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
@ -575,6 +241,10 @@ spec:
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
--- Many .condition.type values are consistent across resources
like Available, but because arbitrary conditions can be useful
(see .node.status.conditions), the ability to deconflict is
important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
@ -587,10 +257,8 @@ spec:
type: object
type: array
lastHandledReconcileAt:
description: |-
LastHandledReconcileAt holds the value of the most recent
reconcile request value, so a change of the annotation value
can be detected.
description: LastHandledReconcileAt holds the value of the most recent
reconcile request value, so a change can be detected.
type: string
observedGeneration:
description: ObservedGeneration is the last observed generation.
@ -602,375 +270,12 @@ spec:
type: object
type: object
served: true
storage: false
storage: true
subresources:
status: {}
- additionalPrinterColumns:
- jsonPath: .spec.chart
name: Chart
type: string
- jsonPath: .spec.version
name: Version
type: string
- jsonPath: .spec.sourceRef.kind
name: Source Kind
type: string
- jsonPath: .spec.sourceRef.name
name: Source Name
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
- jsonPath: .status.conditions[?(@.type=="Ready")].status
name: Ready
type: string
- jsonPath: .status.conditions[?(@.type=="Ready")].message
name: Status
type: string
deprecated: true
deprecationWarning: v1beta2 HelmChart is deprecated, upgrade to v1
name: v1beta2
schema:
openAPIV3Schema:
description: HelmChart is the Schema for the helmcharts API.
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: HelmChartSpec specifies the desired state of a Helm chart.
properties:
accessFrom:
description: |-
AccessFrom specifies an Access Control List for allowing cross-namespace
references to this object.
NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092
properties:
namespaceSelectors:
description: |-
NamespaceSelectors is the list of namespace selectors to which this ACL applies.
Items in this list are evaluated using a logical OR operation.
items:
description: |-
NamespaceSelector selects the namespaces to which this ACL applies.
An empty map of MatchLabels matches all namespaces in a cluster.
properties:
matchLabels:
additionalProperties:
type: string
description: |-
MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions, whose key field is "key", the
operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
type: array
required:
- namespaceSelectors
type: object
chart:
description: |-
Chart is the name or path the Helm chart is available at in the
SourceRef.
type: string
ignoreMissingValuesFiles:
description: |-
IgnoreMissingValuesFiles controls whether to silently ignore missing values
files rather than failing.
type: boolean
interval:
description: |-
Interval at which the HelmChart SourceRef is checked for updates.
This interval is approximate and may be subject to jitter to ensure
efficient use of resources.
pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$
type: string
reconcileStrategy:
default: ChartVersion
description: |-
ReconcileStrategy determines what enables the creation of a new artifact.
Valid values are ('ChartVersion', 'Revision').
See the documentation of the values for an explanation on their behavior.
Defaults to ChartVersion when omitted.
enum:
- ChartVersion
- Revision
type: string
sourceRef:
description: SourceRef is the reference to the Source the chart is
available at.
properties:
apiVersion:
description: APIVersion of the referent.
type: string
kind:
description: |-
Kind of the referent, valid values are ('HelmRepository', 'GitRepository',
'Bucket').
enum:
- HelmRepository
- GitRepository
- Bucket
type: string
name:
description: Name of the referent.
type: string
required:
- kind
- name
type: object
suspend:
description: |-
Suspend tells the controller to suspend the reconciliation of this
source.
type: boolean
valuesFile:
description: |-
ValuesFile is an alternative values file to use as the default chart
values, expected to be a relative path in the SourceRef. Deprecated in
favor of ValuesFiles, for backwards compatibility the file specified here
is merged before the ValuesFiles items. Ignored when omitted.
type: string
valuesFiles:
description: |-
ValuesFiles is an alternative list of values files to use as the chart
values (values.yaml is not included by default), expected to be a
relative path in the SourceRef.
Values files are merged in the order of this list with the last file
overriding the first. Ignored when omitted.
items:
type: string
type: array
verify:
description: |-
Verify contains the secret name containing the trusted public keys
used to verify the signature and specifies which provider to use to check
whether OCI image is authentic.
This field is only supported when using HelmRepository source with spec.type 'oci'.
Chart dependencies, which are not bundled in the umbrella chart artifact, are not verified.
properties:
matchOIDCIdentity:
description: |-
MatchOIDCIdentity specifies the identity matching criteria to use
while verifying an OCI artifact which was signed using Cosign keyless
signing. The artifact's identity is deemed to be verified if any of the
specified matchers match against the identity.
items:
description: |-
OIDCIdentityMatch specifies options for verifying the certificate identity,
i.e. the issuer and the subject of the certificate.
properties:
issuer:
description: |-
Issuer specifies the regex pattern to match against to verify
the OIDC issuer in the Fulcio certificate. The pattern must be a
valid Go regular expression.
type: string
subject:
description: |-
Subject specifies the regex pattern to match against to verify
the identity subject in the Fulcio certificate. The pattern must
be a valid Go regular expression.
type: string
required:
- issuer
- subject
type: object
type: array
provider:
default: cosign
description: Provider specifies the technology used to sign the
OCI Artifact.
enum:
- cosign
- notation
type: string
secretRef:
description: |-
SecretRef specifies the Kubernetes Secret containing the
trusted public keys.
properties:
name:
description: Name of the referent.
type: string
required:
- name
type: object
required:
- provider
type: object
version:
default: '*'
description: |-
Version is the chart version semver expression, ignored for charts from
GitRepository and Bucket sources. Defaults to latest when omitted.
type: string
required:
- chart
- interval
- sourceRef
type: object
status:
default:
observedGeneration: -1
description: HelmChartStatus records the observed state of the HelmChart.
properties:
artifact:
description: Artifact represents the output of the last successful
reconciliation.
properties:
digest:
description: Digest is the digest of the file in the form of '<algorithm>:<checksum>'.
pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$
type: string
lastUpdateTime:
description: |-
LastUpdateTime is the timestamp corresponding to the last update of the
Artifact.
format: date-time
type: string
metadata:
additionalProperties:
type: string
description: Metadata holds upstream information such as OCI annotations.
type: object
path:
description: |-
Path is the relative file path of the Artifact. It can be used to locate
the file in the root of the Artifact storage on the local file system of
the controller managing the Source.
type: string
revision:
description: |-
Revision is a human-readable identifier traceable in the origin source
system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.
type: string
size:
description: Size is the number of bytes in the file.
format: int64
type: integer
url:
description: |-
URL is the HTTP address of the Artifact as exposed by the controller
managing the Source. It can be used to retrieve the Artifact for
consumption, e.g. by another controller applying the Artifact contents.
type: string
required:
- lastUpdateTime
- path
- revision
- url
type: object
conditions:
description: Conditions holds the conditions for the HelmChart.
items:
description: Condition contains details for one aspect of the current
state of this API Resource.
properties:
lastTransitionTime:
description: |-
lastTransitionTime is the last time the condition transitioned from one status to another.
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: |-
message is a human readable message indicating details about the transition.
This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: |-
observedGeneration represents the .metadata.generation that the condition was set based upon.
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: |-
reason contains a programmatic identifier indicating the reason for the condition's last transition.
Producers of specific condition types may define expected values and meanings for this field,
and whether the values are considered a guaranteed API.
The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True, False, Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
required:
- lastTransitionTime
- message
- reason
- status
- type
type: object
type: array
lastHandledReconcileAt:
description: |-
LastHandledReconcileAt holds the value of the most recent
reconcile request value, so a change of the annotation value
can be detected.
type: string
observedChartName:
description: |-
ObservedChartName is the last observed chart name as specified by the
resolved chart reference.
type: string
observedGeneration:
description: |-
ObservedGeneration is the last observed generation of the HelmChart
object.
format: int64
type: integer
observedSourceArtifactRevision:
description: |-
ObservedSourceArtifactRevision is the last observed Artifact.Revision
of the HelmChartSpec.SourceRef.
type: string
observedValuesFiles:
description: |-
ObservedValuesFiles are the observed value files of the last successful
reconciliation.
It matches the chart in the last successfully reconciled artifact.
items:
type: string
type: array
url:
description: |-
URL is the dynamic fetch link for the latest Artifact.
It is provided on a "best effort" basis, and using the precise
BucketStatus.Artifact data is recommended.
type: string
type: object
type: object
served: true
storage: false
subresources:
status: {}
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

View File

@ -1,9 +1,11 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.16.1
controller-gen.kubebuilder.io/version: v0.7.0
creationTimestamp: null
name: helmrepositories.source.toolkit.fluxcd.io
spec:
group: source.toolkit.fluxcd.io
@ -16,308 +18,6 @@ spec:
singular: helmrepository
scope: Namespaced
versions:
- additionalPrinterColumns:
- jsonPath: .spec.url
name: URL
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
- jsonPath: .status.conditions[?(@.type=="Ready")].status
name: Ready
type: string
- jsonPath: .status.conditions[?(@.type=="Ready")].message
name: Status
type: string
name: v1
schema:
openAPIV3Schema:
description: HelmRepository is the Schema for the helmrepositories API.
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: |-
HelmRepositorySpec specifies the required configuration to produce an
Artifact for a Helm repository index YAML.
properties:
accessFrom:
description: |-
AccessFrom specifies an Access Control List for allowing cross-namespace
references to this object.
NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092
properties:
namespaceSelectors:
description: |-
NamespaceSelectors is the list of namespace selectors to which this ACL applies.
Items in this list are evaluated using a logical OR operation.
items:
description: |-
NamespaceSelector selects the namespaces to which this ACL applies.
An empty map of MatchLabels matches all namespaces in a cluster.
properties:
matchLabels:
additionalProperties:
type: string
description: |-
MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions, whose key field is "key", the
operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
type: array
required:
- namespaceSelectors
type: object
certSecretRef:
description: |-
CertSecretRef can be given the name of a Secret containing
either or both of
- a PEM-encoded client certificate (`tls.crt`) and private
key (`tls.key`);
- a PEM-encoded CA certificate (`ca.crt`)
and whichever are supplied, will be used for connecting to the
registry. The client cert and key are useful if you are
authenticating with a certificate; the CA cert is useful if
you are using a self-signed server certificate. The Secret must
be of type `Opaque` or `kubernetes.io/tls`.
It takes precedence over the values specified in the Secret referred
to by `.spec.secretRef`.
properties:
name:
description: Name of the referent.
type: string
required:
- name
type: object
insecure:
description: |-
Insecure allows connecting to a non-TLS HTTP container registry.
This field is only taken into account if the .spec.type field is set to 'oci'.
type: boolean
interval:
description: |-
Interval at which the HelmRepository URL is checked for updates.
This interval is approximate and may be subject to jitter to ensure
efficient use of resources.
pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$
type: string
passCredentials:
description: |-
PassCredentials allows the credentials from the SecretRef to be passed
on to a host that does not match the host as defined in URL.
This may be required if the host of the advertised chart URLs in the
index differ from the defined URL.
Enabling this should be done with caution, as it can potentially result
in credentials getting stolen in a MITM-attack.
type: boolean
provider:
default: generic
description: |-
Provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'.
This field is optional, and only taken into account if the .spec.type field is set to 'oci'.
When not specified, defaults to 'generic'.
enum:
- generic
- aws
- azure
- gcp
type: string
secretRef:
description: |-
SecretRef specifies the Secret containing authentication credentials
for the HelmRepository.
For HTTP/S basic auth the secret must contain 'username' and 'password'
fields.
Support for TLS auth using the 'certFile' and 'keyFile', and/or 'caFile'
keys is deprecated. Please use `.spec.certSecretRef` instead.
properties:
name:
description: Name of the referent.
type: string
required:
- name
type: object
suspend:
description: |-
Suspend tells the controller to suspend the reconciliation of this
HelmRepository.
type: boolean
timeout:
description: |-
Timeout is used for the index fetch operation for an HTTPS helm repository,
and for remote OCI Repository operations like pulling for an OCI helm
chart by the associated HelmChart.
Its default value is 60s.
pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$
type: string
type:
description: |-
Type of the HelmRepository.
When this field is set to "oci", the URL field value must be prefixed with "oci://".
enum:
- default
- oci
type: string
url:
description: |-
URL of the Helm repository, a valid URL contains at least a protocol and
host.
pattern: ^(http|https|oci)://.*$
type: string
required:
- url
type: object
status:
default:
observedGeneration: -1
description: HelmRepositoryStatus records the observed state of the HelmRepository.
properties:
artifact:
description: Artifact represents the last successful HelmRepository
reconciliation.
properties:
digest:
description: Digest is the digest of the file in the form of '<algorithm>:<checksum>'.
pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$
type: string
lastUpdateTime:
description: |-
LastUpdateTime is the timestamp corresponding to the last update of the
Artifact.
format: date-time
type: string
metadata:
additionalProperties:
type: string
description: Metadata holds upstream information such as OCI annotations.
type: object
path:
description: |-
Path is the relative file path of the Artifact. It can be used to locate
the file in the root of the Artifact storage on the local file system of
the controller managing the Source.
type: string
revision:
description: |-
Revision is a human-readable identifier traceable in the origin source
system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.
type: string
size:
description: Size is the number of bytes in the file.
format: int64
type: integer
url:
description: |-
URL is the HTTP address of the Artifact as exposed by the controller
managing the Source. It can be used to retrieve the Artifact for
consumption, e.g. by another controller applying the Artifact contents.
type: string
required:
- lastUpdateTime
- path
- revision
- url
type: object
conditions:
description: Conditions holds the conditions for the HelmRepository.
items:
description: Condition contains details for one aspect of the current
state of this API Resource.
properties:
lastTransitionTime:
description: |-
lastTransitionTime is the last time the condition transitioned from one status to another.
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: |-
message is a human readable message indicating details about the transition.
This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: |-
observedGeneration represents the .metadata.generation that the condition was set based upon.
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: |-
reason contains a programmatic identifier indicating the reason for the condition's last transition.
Producers of specific condition types may define expected values and meanings for this field,
and whether the values are considered a guaranteed API.
The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True, False, Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
required:
- lastTransitionTime
- message
- reason
- status
- type
type: object
type: array
lastHandledReconcileAt:
description: |-
LastHandledReconcileAt holds the value of the most recent
reconcile request value, so a change of the annotation value
can be detected.
type: string
observedGeneration:
description: |-
ObservedGeneration is the last observed generation of the HelmRepository
object.
format: int64
type: integer
url:
description: |-
URL is the dynamic fetch link for the latest Artifact.
It is provided on a "best effort" basis, and using the precise
HelmRepositoryStatus.Artifact data is recommended.
type: string
type: object
type: object
served: true
storage: true
subresources:
status: {}
- additionalPrinterColumns:
- jsonPath: .spec.url
name: URL
@ -331,27 +31,20 @@ spec:
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
deprecated: true
deprecationWarning: v1beta1 HelmRepository is deprecated, upgrade to v1
name: v1beta1
schema:
openAPIV3Schema:
description: HelmRepository is the Schema for the helmrepositories API
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
@ -363,21 +56,22 @@ spec:
cross-namespace references to this object.
properties:
namespaceSelectors:
description: |-
NamespaceSelectors is the list of namespace selectors to which this ACL applies.
Items in this list are evaluated using a logical OR operation.
description: NamespaceSelectors is the list of namespace selectors
to which this ACL applies. Items in this list are evaluated
using a logical OR operation.
items:
description: |-
NamespaceSelector selects the namespaces to which this ACL applies.
An empty map of MatchLabels matches all namespaces in a cluster.
description: NamespaceSelector selects the namespaces to which
this ACL applies. An empty map of MatchLabels matches all
namespaces in a cluster.
properties:
matchLabels:
additionalProperties:
type: string
description: |-
MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions, whose key field is "key", the
operator is "In", and the values array contains only "value". The requirements are ANDed.
description: MatchLabels is a map of {key,value} pairs.
A single {key,value} in the matchLabels map is equivalent
to an element of matchExpressions, whose key field is
"key", the operator is "In", and the values array contains
only "value". The requirements are ANDed.
type: object
type: object
type: array
@ -388,25 +82,21 @@ spec:
description: The interval at which to check the upstream for updates.
type: string
passCredentials:
description: |-
PassCredentials allows the credentials from the SecretRef to be passed on to
a host that does not match the host as defined in URL.
This may be required if the host of the advertised chart URLs in the index
differ from the defined URL.
Enabling this should be done with caution, as it can potentially result in
credentials getting stolen in a MITM-attack.
description: PassCredentials allows the credentials from the SecretRef
to be passed on to a host that does not match the host as defined
in URL. This may be required if the host of the advertised chart
URLs in the index differ from the defined URL. Enabling this should
be done with caution, as it can potentially result in credentials
getting stolen in a MITM-attack.
type: boolean
secretRef:
description: |-
The name of the secret containing authentication credentials for the Helm
repository.
For HTTP/S basic auth the secret must contain username and
password fields.
For TLS the secret must contain a certFile and keyFile, and/or
caFile fields.
description: The name of the secret containing authentication credentials
for the Helm repository. For HTTP/S basic auth the secret must contain
username and password fields. For TLS the secret must contain a
certFile and keyFile, and/or caCert fields.
properties:
name:
description: Name of the referent.
description: Name of the referent
type: string
required:
- name
@ -440,60 +130,66 @@ spec:
description: Checksum is the SHA256 checksum of the artifact.
type: string
lastUpdateTime:
description: |-
LastUpdateTime is the timestamp corresponding to the last update of this
artifact.
description: LastUpdateTime is the timestamp corresponding to
the last update of this artifact.
format: date-time
type: string
path:
description: Path is the relative file path of this artifact.
type: string
revision:
description: |-
Revision is a human readable identifier traceable in the origin source
system. It can be a Git commit SHA, Git tag, a Helm index timestamp, a Helm
chart version, etc.
description: Revision is a human readable identifier traceable
in the origin source system. It can be a Git commit SHA, Git
tag, a Helm index timestamp, a Helm chart version, etc.
type: string
url:
description: URL is the HTTP address of this artifact.
type: string
required:
- lastUpdateTime
- path
- url
type: object
conditions:
description: Conditions holds the conditions for the HelmRepository.
items:
description: Condition contains details for one aspect of the current
state of this API Resource.
description: "Condition contains details for one aspect of the current
state of this API Resource. --- This struct is intended for direct
use as an array at the field path .status.conditions. For example,
type FooStatus struct{ // Represents the observations of a
foo's current state. // Known .status.conditions.type are:
\"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type
\ // +patchStrategy=merge // +listType=map // +listMapKey=type
\ Conditions []metav1.Condition `json:\"conditions,omitempty\"
patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`
\n // other fields }"
properties:
lastTransitionTime:
description: |-
lastTransitionTime is the last time the condition transitioned from one status to another.
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
description: lastTransitionTime is the last time the condition
transitioned from one status to another. This should be when
the underlying condition changed. If that is not known, then
using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: |-
message is a human readable message indicating details about the transition.
This may be an empty string.
description: message is a human readable message indicating
details about the transition. This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: |-
observedGeneration represents the .metadata.generation that the condition was set based upon.
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
with respect to the current state of the instance.
description: observedGeneration represents the .metadata.generation
that the condition was set based upon. For instance, if .metadata.generation
is currently 12, but the .status.conditions[x].observedGeneration
is 9, the condition is out of date with respect to the current
state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: |-
reason contains a programmatic identifier indicating the reason for the condition's last transition.
Producers of specific condition types may define expected values and meanings for this field,
and whether the values are considered a guaranteed API.
The value should be a CamelCase string.
description: reason contains a programmatic identifier indicating
the reason for the condition's last transition. Producers
of specific condition types may define expected values and
meanings for this field, and whether the values are considered
a guaranteed API. The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
@ -508,6 +204,10 @@ spec:
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
--- Many .condition.type values are consistent across resources
like Available, but because arbitrary conditions can be useful
(see .node.status.conditions), the ability to deconflict is
important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
@ -520,10 +220,8 @@ spec:
type: object
type: array
lastHandledReconcileAt:
description: |-
LastHandledReconcileAt holds the value of the most recent
reconcile request value, so a change of the annotation value
can be detected.
description: LastHandledReconcileAt holds the value of the most recent
reconcile request value, so a change can be detected.
type: string
observedGeneration:
description: ObservedGeneration is the last observed generation.
@ -535,310 +233,12 @@ spec:
type: object
type: object
served: true
storage: false
storage: true
subresources:
status: {}
- additionalPrinterColumns:
- jsonPath: .spec.url
name: URL
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
- jsonPath: .status.conditions[?(@.type=="Ready")].status
name: Ready
type: string
- jsonPath: .status.conditions[?(@.type=="Ready")].message
name: Status
type: string
deprecated: true
deprecationWarning: v1beta2 HelmRepository is deprecated, upgrade to v1
name: v1beta2
schema:
openAPIV3Schema:
description: HelmRepository is the Schema for the helmrepositories API.
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: |-
HelmRepositorySpec specifies the required configuration to produce an
Artifact for a Helm repository index YAML.
properties:
accessFrom:
description: |-
AccessFrom specifies an Access Control List for allowing cross-namespace
references to this object.
NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092
properties:
namespaceSelectors:
description: |-
NamespaceSelectors is the list of namespace selectors to which this ACL applies.
Items in this list are evaluated using a logical OR operation.
items:
description: |-
NamespaceSelector selects the namespaces to which this ACL applies.
An empty map of MatchLabels matches all namespaces in a cluster.
properties:
matchLabels:
additionalProperties:
type: string
description: |-
MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
map is equivalent to an element of matchExpressions, whose key field is "key", the
operator is "In", and the values array contains only "value". The requirements are ANDed.
type: object
type: object
type: array
required:
- namespaceSelectors
type: object
certSecretRef:
description: |-
CertSecretRef can be given the name of a Secret containing
either or both of
- a PEM-encoded client certificate (`tls.crt`) and private
key (`tls.key`);
- a PEM-encoded CA certificate (`ca.crt`)
and whichever are supplied, will be used for connecting to the
registry. The client cert and key are useful if you are
authenticating with a certificate; the CA cert is useful if
you are using a self-signed server certificate. The Secret must
be of type `Opaque` or `kubernetes.io/tls`.
It takes precedence over the values specified in the Secret referred
to by `.spec.secretRef`.
properties:
name:
description: Name of the referent.
type: string
required:
- name
type: object
insecure:
description: |-
Insecure allows connecting to a non-TLS HTTP container registry.
This field is only taken into account if the .spec.type field is set to 'oci'.
type: boolean
interval:
description: |-
Interval at which the HelmRepository URL is checked for updates.
This interval is approximate and may be subject to jitter to ensure
efficient use of resources.
pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$
type: string
passCredentials:
description: |-
PassCredentials allows the credentials from the SecretRef to be passed
on to a host that does not match the host as defined in URL.
This may be required if the host of the advertised chart URLs in the
index differ from the defined URL.
Enabling this should be done with caution, as it can potentially result
in credentials getting stolen in a MITM-attack.
type: boolean
provider:
default: generic
description: |-
Provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'.
This field is optional, and only taken into account if the .spec.type field is set to 'oci'.
When not specified, defaults to 'generic'.
enum:
- generic
- aws
- azure
- gcp
type: string
secretRef:
description: |-
SecretRef specifies the Secret containing authentication credentials
for the HelmRepository.
For HTTP/S basic auth the secret must contain 'username' and 'password'
fields.
Support for TLS auth using the 'certFile' and 'keyFile', and/or 'caFile'
keys is deprecated. Please use `.spec.certSecretRef` instead.
properties:
name:
description: Name of the referent.
type: string
required:
- name
type: object
suspend:
description: |-
Suspend tells the controller to suspend the reconciliation of this
HelmRepository.
type: boolean
timeout:
description: |-
Timeout is used for the index fetch operation for an HTTPS helm repository,
and for remote OCI Repository operations like pulling for an OCI helm
chart by the associated HelmChart.
Its default value is 60s.
pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$
type: string
type:
description: |-
Type of the HelmRepository.
When this field is set to "oci", the URL field value must be prefixed with "oci://".
enum:
- default
- oci
type: string
url:
description: |-
URL of the Helm repository, a valid URL contains at least a protocol and
host.
pattern: ^(http|https|oci)://.*$
type: string
required:
- url
type: object
status:
default:
observedGeneration: -1
description: HelmRepositoryStatus records the observed state of the HelmRepository.
properties:
artifact:
description: Artifact represents the last successful HelmRepository
reconciliation.
properties:
digest:
description: Digest is the digest of the file in the form of '<algorithm>:<checksum>'.
pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$
type: string
lastUpdateTime:
description: |-
LastUpdateTime is the timestamp corresponding to the last update of the
Artifact.
format: date-time
type: string
metadata:
additionalProperties:
type: string
description: Metadata holds upstream information such as OCI annotations.
type: object
path:
description: |-
Path is the relative file path of the Artifact. It can be used to locate
the file in the root of the Artifact storage on the local file system of
the controller managing the Source.
type: string
revision:
description: |-
Revision is a human-readable identifier traceable in the origin source
system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.
type: string
size:
description: Size is the number of bytes in the file.
format: int64
type: integer
url:
description: |-
URL is the HTTP address of the Artifact as exposed by the controller
managing the Source. It can be used to retrieve the Artifact for
consumption, e.g. by another controller applying the Artifact contents.
type: string
required:
- lastUpdateTime
- path
- revision
- url
type: object
conditions:
description: Conditions holds the conditions for the HelmRepository.
items:
description: Condition contains details for one aspect of the current
state of this API Resource.
properties:
lastTransitionTime:
description: |-
lastTransitionTime is the last time the condition transitioned from one status to another.
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: |-
message is a human readable message indicating details about the transition.
This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: |-
observedGeneration represents the .metadata.generation that the condition was set based upon.
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: |-
reason contains a programmatic identifier indicating the reason for the condition's last transition.
Producers of specific condition types may define expected values and meanings for this field,
and whether the values are considered a guaranteed API.
The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True, False, Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
required:
- lastTransitionTime
- message
- reason
- status
- type
type: object
type: array
lastHandledReconcileAt:
description: |-
LastHandledReconcileAt holds the value of the most recent
reconcile request value, so a change of the annotation value
can be detected.
type: string
observedGeneration:
description: |-
ObservedGeneration is the last observed generation of the HelmRepository
object.
format: int64
type: integer
url:
description: |-
URL is the dynamic fetch link for the latest Artifact.
It is provided on a "best effort" basis, and using the precise
HelmRepositoryStatus.Artifact data is recommended.
type: string
type: object
type: object
served: true
storage: false
subresources:
status: {}
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

View File

@ -1,821 +0,0 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.16.1
name: ocirepositories.source.toolkit.fluxcd.io
spec:
group: source.toolkit.fluxcd.io
names:
kind: OCIRepository
listKind: OCIRepositoryList
plural: ocirepositories
shortNames:
- ocirepo
singular: ocirepository
scope: Namespaced
versions:
- additionalPrinterColumns:
- jsonPath: .spec.url
name: URL
type: string
- jsonPath: .status.conditions[?(@.type=="Ready")].status
name: Ready
type: string
- jsonPath: .status.conditions[?(@.type=="Ready")].message
name: Status
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
name: v1
schema:
openAPIV3Schema:
description: OCIRepository is the Schema for the ocirepositories API
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: OCIRepositorySpec defines the desired state of OCIRepository
properties:
certSecretRef:
description: |-
CertSecretRef can be given the name of a Secret containing
either or both of
- a PEM-encoded client certificate (`tls.crt`) and private
key (`tls.key`);
- a PEM-encoded CA certificate (`ca.crt`)
and whichever are supplied, will be used for connecting to the
registry. The client cert and key are useful if you are
authenticating with a certificate; the CA cert is useful if
you are using a self-signed server certificate. The Secret must
be of type `Opaque` or `kubernetes.io/tls`.
properties:
name:
description: Name of the referent.
type: string
required:
- name
type: object
ignore:
description: |-
Ignore overrides the set of excluded patterns in the .sourceignore format
(which is the same as .gitignore). If not provided, a default will be used,
consult the documentation for your version to find out what those are.
type: string
insecure:
description: Insecure allows connecting to a non-TLS HTTP container
registry.
type: boolean
interval:
description: |-
Interval at which the OCIRepository URL is checked for updates.
This interval is approximate and may be subject to jitter to ensure
efficient use of resources.
pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$
type: string
layerSelector:
description: |-
LayerSelector specifies which layer should be extracted from the OCI artifact.
When not specified, the first layer found in the artifact is selected.
properties:
mediaType:
description: |-
MediaType specifies the OCI media type of the layer
which should be extracted from the OCI Artifact. The
first layer matching this type is selected.
type: string
operation:
description: |-
Operation specifies how the selected layer should be processed.
By default, the layer compressed content is extracted to storage.
When the operation is set to 'copy', the layer compressed content
is persisted to storage as it is.
enum:
- extract
- copy
type: string
type: object
provider:
default: generic
description: |-
The provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'.
When not specified, defaults to 'generic'.
enum:
- generic
- aws
- azure
- gcp
type: string
proxySecretRef:
description: |-
ProxySecretRef specifies the Secret containing the proxy configuration
to use while communicating with the container registry.
properties:
name:
description: Name of the referent.
type: string
required:
- name
type: object
ref:
description: |-
The OCI reference to pull and monitor for changes,
defaults to the latest tag.
properties:
digest:
description: |-
Digest is the image digest to pull, takes precedence over SemVer.
The value should be in the format 'sha256:<HASH>'.
type: string
semver:
description: |-
SemVer is the range of tags to pull selecting the latest within
the range, takes precedence over Tag.
type: string
semverFilter:
description: SemverFilter is a regex pattern to filter the tags
within the SemVer range.
type: string
tag:
description: Tag is the image tag to pull, defaults to latest.
type: string
type: object
secretRef:
description: |-
SecretRef contains the secret name containing the registry login
credentials to resolve image metadata.
The secret must be of type kubernetes.io/dockerconfigjson.
properties:
name:
description: Name of the referent.
type: string
required:
- name
type: object
serviceAccountName:
description: |-
ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate
the image pull if the service account has attached pull secrets. For more information:
https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account
type: string
suspend:
description: This flag tells the controller to suspend the reconciliation
of this source.
type: boolean
timeout:
default: 60s
description: The timeout for remote OCI Repository operations like
pulling, defaults to 60s.
pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$
type: string
url:
description: |-
URL is a reference to an OCI artifact repository hosted
on a remote container registry.
pattern: ^oci://.*$
type: string
verify:
description: |-
Verify contains the secret name containing the trusted public keys
used to verify the signature and specifies which provider to use to check
whether OCI image is authentic.
properties:
matchOIDCIdentity:
description: |-
MatchOIDCIdentity specifies the identity matching criteria to use
while verifying an OCI artifact which was signed using Cosign keyless
signing. The artifact's identity is deemed to be verified if any of the
specified matchers match against the identity.
items:
description: |-
OIDCIdentityMatch specifies options for verifying the certificate identity,
i.e. the issuer and the subject of the certificate.
properties:
issuer:
description: |-
Issuer specifies the regex pattern to match against to verify
the OIDC issuer in the Fulcio certificate. The pattern must be a
valid Go regular expression.
type: string
subject:
description: |-
Subject specifies the regex pattern to match against to verify
the identity subject in the Fulcio certificate. The pattern must
be a valid Go regular expression.
type: string
required:
- issuer
- subject
type: object
type: array
provider:
default: cosign
description: Provider specifies the technology used to sign the
OCI Artifact.
enum:
- cosign
- notation
type: string
secretRef:
description: |-
SecretRef specifies the Kubernetes Secret containing the
trusted public keys.
properties:
name:
description: Name of the referent.
type: string
required:
- name
type: object
required:
- provider
type: object
required:
- interval
- url
type: object
status:
default:
observedGeneration: -1
description: OCIRepositoryStatus defines the observed state of OCIRepository
properties:
artifact:
description: Artifact represents the output of the last successful
OCI Repository sync.
properties:
digest:
description: Digest is the digest of the file in the form of '<algorithm>:<checksum>'.
pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$
type: string
lastUpdateTime:
description: |-
LastUpdateTime is the timestamp corresponding to the last update of the
Artifact.
format: date-time
type: string
metadata:
additionalProperties:
type: string
description: Metadata holds upstream information such as OCI annotations.
type: object
path:
description: |-
Path is the relative file path of the Artifact. It can be used to locate
the file in the root of the Artifact storage on the local file system of
the controller managing the Source.
type: string
revision:
description: |-
Revision is a human-readable identifier traceable in the origin source
system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.
type: string
size:
description: Size is the number of bytes in the file.
format: int64
type: integer
url:
description: |-
URL is the HTTP address of the Artifact as exposed by the controller
managing the Source. It can be used to retrieve the Artifact for
consumption, e.g. by another controller applying the Artifact contents.
type: string
required:
- lastUpdateTime
- path
- revision
- url
type: object
conditions:
description: Conditions holds the conditions for the OCIRepository.
items:
description: Condition contains details for one aspect of the current
state of this API Resource.
properties:
lastTransitionTime:
description: |-
lastTransitionTime is the last time the condition transitioned from one status to another.
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: |-
message is a human readable message indicating details about the transition.
This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: |-
observedGeneration represents the .metadata.generation that the condition was set based upon.
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: |-
reason contains a programmatic identifier indicating the reason for the condition's last transition.
Producers of specific condition types may define expected values and meanings for this field,
and whether the values are considered a guaranteed API.
The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True, False, Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
required:
- lastTransitionTime
- message
- reason
- status
- type
type: object
type: array
lastHandledReconcileAt:
description: |-
LastHandledReconcileAt holds the value of the most recent
reconcile request value, so a change of the annotation value
can be detected.
type: string
observedGeneration:
description: ObservedGeneration is the last observed generation.
format: int64
type: integer
observedIgnore:
description: |-
ObservedIgnore is the observed exclusion patterns used for constructing
the source artifact.
type: string
observedLayerSelector:
description: |-
ObservedLayerSelector is the observed layer selector used for constructing
the source artifact.
properties:
mediaType:
description: |-
MediaType specifies the OCI media type of the layer
which should be extracted from the OCI Artifact. The
first layer matching this type is selected.
type: string
operation:
description: |-
Operation specifies how the selected layer should be processed.
By default, the layer compressed content is extracted to storage.
When the operation is set to 'copy', the layer compressed content
is persisted to storage as it is.
enum:
- extract
- copy
type: string
type: object
url:
description: URL is the download link for the artifact output of the
last OCI Repository sync.
type: string
type: object
type: object
served: true
storage: true
subresources:
status: {}
- additionalPrinterColumns:
- jsonPath: .spec.url
name: URL
type: string
- jsonPath: .status.conditions[?(@.type=="Ready")].status
name: Ready
type: string
- jsonPath: .status.conditions[?(@.type=="Ready")].message
name: Status
type: string
- jsonPath: .metadata.creationTimestamp
name: Age
type: date
deprecated: true
deprecationWarning: v1beta2 OCIRepository is deprecated, upgrade to v1
name: v1beta2
schema:
openAPIV3Schema:
description: OCIRepository is the Schema for the ocirepositories API
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description: OCIRepositorySpec defines the desired state of OCIRepository
properties:
certSecretRef:
description: |-
CertSecretRef can be given the name of a Secret containing
either or both of
- a PEM-encoded client certificate (`tls.crt`) and private
key (`tls.key`);
- a PEM-encoded CA certificate (`ca.crt`)
and whichever are supplied, will be used for connecting to the
registry. The client cert and key are useful if you are
authenticating with a certificate; the CA cert is useful if
you are using a self-signed server certificate. The Secret must
be of type `Opaque` or `kubernetes.io/tls`.
Note: Support for the `caFile`, `certFile` and `keyFile` keys have
been deprecated.
properties:
name:
description: Name of the referent.
type: string
required:
- name
type: object
ignore:
description: |-
Ignore overrides the set of excluded patterns in the .sourceignore format
(which is the same as .gitignore). If not provided, a default will be used,
consult the documentation for your version to find out what those are.
type: string
insecure:
description: Insecure allows connecting to a non-TLS HTTP container
registry.
type: boolean
interval:
description: |-
Interval at which the OCIRepository URL is checked for updates.
This interval is approximate and may be subject to jitter to ensure
efficient use of resources.
pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$
type: string
layerSelector:
description: |-
LayerSelector specifies which layer should be extracted from the OCI artifact.
When not specified, the first layer found in the artifact is selected.
properties:
mediaType:
description: |-
MediaType specifies the OCI media type of the layer
which should be extracted from the OCI Artifact. The
first layer matching this type is selected.
type: string
operation:
description: |-
Operation specifies how the selected layer should be processed.
By default, the layer compressed content is extracted to storage.
When the operation is set to 'copy', the layer compressed content
is persisted to storage as it is.
enum:
- extract
- copy
type: string
type: object
provider:
default: generic
description: |-
The provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'.
When not specified, defaults to 'generic'.
enum:
- generic
- aws
- azure
- gcp
type: string
proxySecretRef:
description: |-
ProxySecretRef specifies the Secret containing the proxy configuration
to use while communicating with the container registry.
properties:
name:
description: Name of the referent.
type: string
required:
- name
type: object
ref:
description: |-
The OCI reference to pull and monitor for changes,
defaults to the latest tag.
properties:
digest:
description: |-
Digest is the image digest to pull, takes precedence over SemVer.
The value should be in the format 'sha256:<HASH>'.
type: string
semver:
description: |-
SemVer is the range of tags to pull selecting the latest within
the range, takes precedence over Tag.
type: string
semverFilter:
description: SemverFilter is a regex pattern to filter the tags
within the SemVer range.
type: string
tag:
description: Tag is the image tag to pull, defaults to latest.
type: string
type: object
secretRef:
description: |-
SecretRef contains the secret name containing the registry login
credentials to resolve image metadata.
The secret must be of type kubernetes.io/dockerconfigjson.
properties:
name:
description: Name of the referent.
type: string
required:
- name
type: object
serviceAccountName:
description: |-
ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate
the image pull if the service account has attached pull secrets. For more information:
https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account
type: string
suspend:
description: This flag tells the controller to suspend the reconciliation
of this source.
type: boolean
timeout:
default: 60s
description: The timeout for remote OCI Repository operations like
pulling, defaults to 60s.
pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$
type: string
url:
description: |-
URL is a reference to an OCI artifact repository hosted
on a remote container registry.
pattern: ^oci://.*$
type: string
verify:
description: |-
Verify contains the secret name containing the trusted public keys
used to verify the signature and specifies which provider to use to check
whether OCI image is authentic.
properties:
matchOIDCIdentity:
description: |-
MatchOIDCIdentity specifies the identity matching criteria to use
while verifying an OCI artifact which was signed using Cosign keyless
signing. The artifact's identity is deemed to be verified if any of the
specified matchers match against the identity.
items:
description: |-
OIDCIdentityMatch specifies options for verifying the certificate identity,
i.e. the issuer and the subject of the certificate.
properties:
issuer:
description: |-
Issuer specifies the regex pattern to match against to verify
the OIDC issuer in the Fulcio certificate. The pattern must be a
valid Go regular expression.
type: string
subject:
description: |-
Subject specifies the regex pattern to match against to verify
the identity subject in the Fulcio certificate. The pattern must
be a valid Go regular expression.
type: string
required:
- issuer
- subject
type: object
type: array
provider:
default: cosign
description: Provider specifies the technology used to sign the
OCI Artifact.
enum:
- cosign
- notation
type: string
secretRef:
description: |-
SecretRef specifies the Kubernetes Secret containing the
trusted public keys.
properties:
name:
description: Name of the referent.
type: string
required:
- name
type: object
required:
- provider
type: object
required:
- interval
- url
type: object
status:
default:
observedGeneration: -1
description: OCIRepositoryStatus defines the observed state of OCIRepository
properties:
artifact:
description: Artifact represents the output of the last successful
OCI Repository sync.
properties:
digest:
description: Digest is the digest of the file in the form of '<algorithm>:<checksum>'.
pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$
type: string
lastUpdateTime:
description: |-
LastUpdateTime is the timestamp corresponding to the last update of the
Artifact.
format: date-time
type: string
metadata:
additionalProperties:
type: string
description: Metadata holds upstream information such as OCI annotations.
type: object
path:
description: |-
Path is the relative file path of the Artifact. It can be used to locate
the file in the root of the Artifact storage on the local file system of
the controller managing the Source.
type: string
revision:
description: |-
Revision is a human-readable identifier traceable in the origin source
system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.
type: string
size:
description: Size is the number of bytes in the file.
format: int64
type: integer
url:
description: |-
URL is the HTTP address of the Artifact as exposed by the controller
managing the Source. It can be used to retrieve the Artifact for
consumption, e.g. by another controller applying the Artifact contents.
type: string
required:
- lastUpdateTime
- path
- revision
- url
type: object
conditions:
description: Conditions holds the conditions for the OCIRepository.
items:
description: Condition contains details for one aspect of the current
state of this API Resource.
properties:
lastTransitionTime:
description: |-
lastTransitionTime is the last time the condition transitioned from one status to another.
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: |-
message is a human readable message indicating details about the transition.
This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: |-
observedGeneration represents the .metadata.generation that the condition was set based upon.
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
with respect to the current state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: |-
reason contains a programmatic identifier indicating the reason for the condition's last transition.
Producers of specific condition types may define expected values and meanings for this field,
and whether the values are considered a guaranteed API.
The value should be a CamelCase string.
This field may not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True, False, Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
required:
- lastTransitionTime
- message
- reason
- status
- type
type: object
type: array
contentConfigChecksum:
description: |-
ContentConfigChecksum is a checksum of all the configurations related to
the content of the source artifact:
- .spec.ignore
- .spec.layerSelector
observed in .status.observedGeneration version of the object. This can
be used to determine if the content configuration has changed and the
artifact needs to be rebuilt.
It has the format of `<algo>:<checksum>`, for example: `sha256:<checksum>`.
Deprecated: Replaced with explicit fields for observed artifact content
config in the status.
type: string
lastHandledReconcileAt:
description: |-
LastHandledReconcileAt holds the value of the most recent
reconcile request value, so a change of the annotation value
can be detected.
type: string
observedGeneration:
description: ObservedGeneration is the last observed generation.
format: int64
type: integer
observedIgnore:
description: |-
ObservedIgnore is the observed exclusion patterns used for constructing
the source artifact.
type: string
observedLayerSelector:
description: |-
ObservedLayerSelector is the observed layer selector used for constructing
the source artifact.
properties:
mediaType:
description: |-
MediaType specifies the OCI media type of the layer
which should be extracted from the OCI Artifact. The
first layer matching this type is selected.
type: string
operation:
description: |-
Operation specifies how the selected layer should be processed.
By default, the layer compressed content is extracted to storage.
When the operation is set to 'copy', the layer compressed content
is persisted to storage as it is.
enum:
- extract
- copy
type: string
type: object
url:
description: URL is the download link for the artifact output of the
last OCI Repository sync.
type: string
type: object
type: object
served: true
storage: false
subresources:
status: {}

View File

@ -5,5 +5,4 @@ resources:
- bases/source.toolkit.fluxcd.io_helmrepositories.yaml
- bases/source.toolkit.fluxcd.io_helmcharts.yaml
- bases/source.toolkit.fluxcd.io_buckets.yaml
- bases/source.toolkit.fluxcd.io_ocirepositories.yaml
# +kubebuilder:scaffold:crdkustomizeresource

View File

@ -51,8 +51,6 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: TUF_ROOT # store the Fulcio root CA file in tmp
value: "/tmp/.sigstore"
args:
- --watch-all-namespaces
- --log-level=info

View File

@ -6,4 +6,4 @@ resources:
images:
- name: fluxcd/source-controller
newName: fluxcd/source-controller
newTag: v1.6.0
newTag: v0.21.2

View File

@ -1,24 +0,0 @@
# permissions for end users to edit ocirepositories.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: ocirepository-editor-role
rules:
- apiGroups:
- source.toolkit.fluxcd.io
resources:
- ocirepositories
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- source.toolkit.fluxcd.io
resources:
- ocirepositories/status
verbs:
- get

View File

@ -1,20 +0,0 @@
# permissions for end users to view ocirepositories.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: ocirepository-viewer-role
rules:
- apiGroups:
- source.toolkit.fluxcd.io
resources:
- ocirepositories
verbs:
- get
- list
- watch
- apiGroups:
- source.toolkit.fluxcd.io
resources:
- ocirepositories/status
verbs:
- get

View File

@ -1,7 +1,9 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: manager-role
rules:
- apiGroups:
@ -19,20 +21,10 @@ rules:
- get
- list
- watch
- apiGroups:
- ""
resources:
- serviceaccounts/token
verbs:
- create
- apiGroups:
- source.toolkit.fluxcd.io
resources:
- buckets
- gitrepositories
- helmcharts
- helmrepositories
- ocirepositories
verbs:
- create
- delete
@ -45,10 +37,6 @@ rules:
- source.toolkit.fluxcd.io
resources:
- buckets/finalizers
- gitrepositories/finalizers
- helmcharts/finalizers
- helmrepositories/finalizers
- ocirepositories/finalizers
verbs:
- create
- delete
@ -59,10 +47,96 @@ rules:
- source.toolkit.fluxcd.io
resources:
- buckets/status
- gitrepositories/status
- helmcharts/status
- helmrepositories/status
- ocirepositories/status
verbs:
- get
- patch
- update
- apiGroups:
- source.toolkit.fluxcd.io
resources:
- gitrepositories
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- source.toolkit.fluxcd.io
resources:
- gitrepositories/finalizers
verbs:
- create
- delete
- get
- patch
- update
- apiGroups:
- source.toolkit.fluxcd.io
resources:
- gitrepositories/status
verbs:
- get
- patch
- update
- apiGroups:
- source.toolkit.fluxcd.io
resources:
- helmcharts
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- source.toolkit.fluxcd.io
resources:
- helmcharts/finalizers
verbs:
- create
- delete
- get
- patch
- update
- apiGroups:
- source.toolkit.fluxcd.io
resources:
- helmcharts/status
verbs:
- get
- patch
- update
- apiGroups:
- source.toolkit.fluxcd.io
resources:
- helmrepositories
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- source.toolkit.fluxcd.io
resources:
- helmrepositories/finalizers
verbs:
- create
- delete
- get
- patch
- update
- apiGroups:
- source.toolkit.fluxcd.io
resources:
- helmrepositories/status
verbs:
- get
- patch

View File

@ -1,11 +0,0 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmChart
metadata:
name: helmchart-sample-oci
spec:
chart: stefanprodan/charts/podinfo
version: '>=6.0.0 <7.0.0'
sourceRef:
kind: HelmRepository
name: helmrepository-sample-oci
interval: 1m

View File

@ -1,8 +0,0 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: helmrepository-sample-oci
spec:
interval: 1m
type: oci
url: oci://ghcr.io/

View File

@ -1,9 +0,0 @@
apiVersion: source.toolkit.fluxcd.io/v1
kind: OCIRepository
metadata:
name: ocirepository-sample
spec:
interval: 1m
url: oci://ghcr.io/stefanprodan/manifests/podinfo
ref:
tag: 6.1.6

View File

@ -1,4 +1,4 @@
apiVersion: source.toolkit.fluxcd.io/v1
apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: Bucket
metadata:
name: bucket-sample

View File

@ -1,4 +1,4 @@
apiVersion: source.toolkit.fluxcd.io/v1
apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: GitRepository
metadata:
name: gitrepository-sample

View File

@ -1,4 +1,4 @@
apiVersion: source.toolkit.fluxcd.io/v1
apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: HelmChart
metadata:
name: helmchart-git-sample

View File

@ -1,12 +1,11 @@
apiVersion: source.toolkit.fluxcd.io/v1
apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: HelmChart
metadata:
name: helmchart-sample
spec:
chart: podinfo
version: '6.x'
version: '>=2.0.0 <3.0.0'
sourceRef:
kind: HelmRepository
name: helmrepository-sample
interval: 1m
ignoreMissingValuesFiles: true

View File

@ -1,4 +1,4 @@
apiVersion: source.toolkit.fluxcd.io/v1
apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: HelmRepository
metadata:
name: helmrepository-sample

View File

@ -1,5 +1,5 @@
---
apiVersion: source.toolkit.fluxcd.io/v1
apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: Bucket
metadata:
name: podinfo

View File

@ -1,10 +1,29 @@
apiVersion: source.toolkit.fluxcd.io/v1
apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: GitRepository
metadata:
name: large-repo
name: large-repo-go-git
spec:
gitImplementation: go-git
interval: 10m
timeout: 2m
url: https://github.com/nodejs/node.git
url: https://github.com/hashgraph/hedera-mirror-node.git
ref:
branch: main
ignore: |
/*
!/charts
---
apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: GitRepository
metadata:
name: large-repo-libgit2
spec:
gitImplementation: libgit2
interval: 10m
timeout: 2m
url: https://github.com/hashgraph/hedera-mirror-node.git
ref:
branch: main
ignore: |
/*
!/charts

View File

@ -1,5 +1,5 @@
---
apiVersion: source.toolkit.fluxcd.io/v1
apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: Bucket
metadata:
name: charts
@ -13,7 +13,7 @@ spec:
secretRef:
name: minio-credentials
---
apiVersion: source.toolkit.fluxcd.io/v1
apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: HelmChart
metadata:
name: helmchart-bucket

View File

@ -1,25 +0,0 @@
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: podinfo-notation
spec:
url: oci://ghcr.io/stefanprodan/charts
type: "oci"
interval: 1m
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmChart
metadata:
name: podinfo-notation
spec:
chart: podinfo
sourceRef:
kind: HelmRepository
name: podinfo-notation
version: '6.6.0'
interval: 1m
verify:
provider: notation
secretRef:
name: notation-config

View File

@ -1,35 +0,0 @@
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmRepository
metadata:
name: podinfo
spec:
url: oci://ghcr.io/stefanprodan/charts
type: "oci"
interval: 1m
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmChart
metadata:
name: podinfo
spec:
chart: podinfo
sourceRef:
kind: HelmRepository
name: podinfo
version: '6.1.*'
interval: 1m
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: HelmChart
metadata:
name: podinfo-keyless
spec:
chart: podinfo
sourceRef:
kind: HelmRepository
name: podinfo
version: '6.2.1'
interval: 1m
verify:
provider: cosign

View File

@ -1,14 +0,0 @@
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: OCIRepository
metadata:
name: podinfo-deploy-signed-with-key
spec:
interval: 5m
url: oci://ghcr.io/stefanprodan/podinfo-deploy
ref:
semver: "6.2.x"
verify:
provider: cosign
secretRef:
name: cosign-key

View File

@ -1,12 +0,0 @@
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: OCIRepository
metadata:
name: podinfo-deploy-signed-with-keyless
spec:
interval: 5m
url: oci://ghcr.io/stefanprodan/manifests/podinfo
ref:
semver: "6.2.x"
verify:
provider: cosign

View File

@ -1,14 +0,0 @@
---
apiVersion: source.toolkit.fluxcd.io/v1
kind: OCIRepository
metadata:
name: podinfo-deploy-signed-with-notation
spec:
interval: 5m
url: oci://ghcr.io/stefanprodan/podinfo-deploy
ref:
semver: "6.6.x"
verify:
provider: notation
secretRef:
name: notation-config

23
controllers/artifact.go Normal file
View File

@ -0,0 +1,23 @@
package controllers
import sourcev1 "github.com/fluxcd/source-controller/api/v1beta1"
// hasArtifactUpdated returns true if any of the revisions in the current artifacts
// does not match any of the artifacts in the updated artifacts
func hasArtifactUpdated(current []*sourcev1.Artifact, updated []*sourcev1.Artifact) bool {
if len(current) != len(updated) {
return true
}
OUTER:
for _, c := range current {
for _, u := range updated {
if u.HasRevision(c.Revision) {
continue OUTER
}
}
return true
}
return false
}

View File

@ -0,0 +1,110 @@
package controllers
import (
"testing"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta1"
)
func TestHasUpdated(t *testing.T) {
tests := []struct {
name string
current []*sourcev1.Artifact
updated []*sourcev1.Artifact
expected bool
}{
{
name: "not updated single",
current: []*sourcev1.Artifact{
{
Revision: "foo",
},
},
updated: []*sourcev1.Artifact{
{
Revision: "foo",
},
},
expected: false,
},
{
name: "updated single",
current: []*sourcev1.Artifact{
{
Revision: "foo",
},
},
updated: []*sourcev1.Artifact{
{
Revision: "bar",
},
},
expected: true,
},
{
name: "not updated multiple",
current: []*sourcev1.Artifact{
{
Revision: "foo",
},
{
Revision: "bar",
},
},
updated: []*sourcev1.Artifact{
{
Revision: "foo",
},
{
Revision: "bar",
},
},
expected: false,
},
{
name: "updated multiple",
current: []*sourcev1.Artifact{
{
Revision: "foo",
},
{
Revision: "bar",
},
},
updated: []*sourcev1.Artifact{
{
Revision: "foo",
},
{
Revision: "baz",
},
},
expected: true,
},
{
name: "updated different artifact count",
current: []*sourcev1.Artifact{
{
Revision: "foo",
},
{
Revision: "bar",
},
},
updated: []*sourcev1.Artifact{
{
Revision: "foo",
},
},
expected: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := hasArtifactUpdated(tt.current, tt.updated)
if result != tt.expected {
t.Errorf("Archive() result = %v, wantResult %v", result, tt.expected)
}
})
}
}

View File

@ -0,0 +1,611 @@
/*
Copyright 2020 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"crypto/sha1"
"fmt"
"os"
"path/filepath"
"strings"
"time"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/minio/minio-go/v7/pkg/s3utils"
"google.golang.org/api/option"
corev1 "k8s.io/api/core/v1"
apimeta "k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
kuberecorder "k8s.io/client-go/tools/record"
"k8s.io/client-go/tools/reference"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"github.com/fluxcd/pkg/apis/meta"
"github.com/fluxcd/pkg/runtime/events"
"github.com/fluxcd/pkg/runtime/metrics"
"github.com/fluxcd/pkg/runtime/predicates"
"github.com/fluxcd/source-controller/pkg/gcp"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta1"
"github.com/fluxcd/source-controller/pkg/sourceignore"
)
// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets/finalizers,verbs=get;create;update;patch;delete
// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch
// BucketReconciler reconciles a Bucket object
type BucketReconciler struct {
client.Client
Scheme *runtime.Scheme
Storage *Storage
EventRecorder kuberecorder.EventRecorder
ExternalEventRecorder *events.Recorder
MetricsRecorder *metrics.Recorder
}
type BucketReconcilerOptions struct {
MaxConcurrentReconciles int
}
func (r *BucketReconciler) SetupWithManager(mgr ctrl.Manager) error {
return r.SetupWithManagerAndOptions(mgr, BucketReconcilerOptions{})
}
func (r *BucketReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts BucketReconcilerOptions) error {
return ctrl.NewControllerManagedBy(mgr).
For(&sourcev1.Bucket{}).
WithEventFilter(predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{})).
WithOptions(controller.Options{MaxConcurrentReconciles: opts.MaxConcurrentReconciles}).
Complete(r)
}
func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
start := time.Now()
log := ctrl.LoggerFrom(ctx)
var bucket sourcev1.Bucket
if err := r.Get(ctx, req.NamespacedName, &bucket); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
// Record suspended status metric
defer r.recordSuspension(ctx, bucket)
// Add our finalizer if it does not exist
if !controllerutil.ContainsFinalizer(&bucket, sourcev1.SourceFinalizer) {
patch := client.MergeFrom(bucket.DeepCopy())
controllerutil.AddFinalizer(&bucket, sourcev1.SourceFinalizer)
if err := r.Patch(ctx, &bucket, patch); err != nil {
log.Error(err, "unable to register finalizer")
return ctrl.Result{}, err
}
}
// Examine if the object is under deletion
if !bucket.ObjectMeta.DeletionTimestamp.IsZero() {
return r.reconcileDelete(ctx, bucket)
}
// Return early if the object is suspended.
if bucket.Spec.Suspend {
log.Info("Reconciliation is suspended for this object")
return ctrl.Result{}, nil
}
// record reconciliation duration
if r.MetricsRecorder != nil {
objRef, err := reference.GetReference(r.Scheme, &bucket)
if err != nil {
return ctrl.Result{}, err
}
defer r.MetricsRecorder.RecordDuration(*objRef, start)
}
// set initial status
if resetBucket, ok := r.resetStatus(bucket); ok {
bucket = resetBucket
if err := r.updateStatus(ctx, req, bucket.Status); err != nil {
log.Error(err, "unable to update status")
return ctrl.Result{Requeue: true}, err
}
r.recordReadiness(ctx, bucket)
}
// record the value of the reconciliation request, if any
// TODO(hidde): would be better to defer this in combination with
// always patching the status sub-resource after a reconciliation.
if v, ok := meta.ReconcileAnnotationValue(bucket.GetAnnotations()); ok {
bucket.Status.SetLastHandledReconcileRequest(v)
}
// purge old artifacts from storage
if err := r.gc(bucket); err != nil {
log.Error(err, "unable to purge old artifacts")
}
// reconcile bucket by downloading its content
reconciledBucket, reconcileErr := r.reconcile(ctx, *bucket.DeepCopy())
// update status with the reconciliation result
if err := r.updateStatus(ctx, req, reconciledBucket.Status); err != nil {
log.Error(err, "unable to update status")
return ctrl.Result{Requeue: true}, err
}
// if reconciliation failed, record the failure and requeue immediately
if reconcileErr != nil {
r.event(ctx, reconciledBucket, events.EventSeverityError, reconcileErr.Error())
r.recordReadiness(ctx, reconciledBucket)
return ctrl.Result{Requeue: true}, reconcileErr
}
// emit revision change event
if bucket.Status.Artifact == nil || reconciledBucket.Status.Artifact.Revision != bucket.Status.Artifact.Revision {
r.event(ctx, reconciledBucket, events.EventSeverityInfo, sourcev1.BucketReadyMessage(reconciledBucket))
}
r.recordReadiness(ctx, reconciledBucket)
log.Info(fmt.Sprintf("Reconciliation finished in %s, next run in %s",
time.Since(start).String(),
bucket.GetInterval().Duration.String(),
))
return ctrl.Result{RequeueAfter: bucket.GetInterval().Duration}, nil
}
func (r *BucketReconciler) reconcile(ctx context.Context, bucket sourcev1.Bucket) (sourcev1.Bucket, error) {
log := ctrl.LoggerFrom(ctx)
var err error
var sourceBucket sourcev1.Bucket
tempDir, err := os.MkdirTemp("", bucket.Name)
if err != nil {
err = fmt.Errorf("tmp dir error: %w", err)
return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err
}
defer func() {
if err := os.RemoveAll(tempDir); err != nil {
log.Error(err, "failed to remove working directory", "path", tempDir)
}
}()
if bucket.Spec.Provider == sourcev1.GoogleBucketProvider {
sourceBucket, err = r.reconcileWithGCP(ctx, bucket, tempDir)
if err != nil {
return sourceBucket, err
}
} else {
sourceBucket, err = r.reconcileWithMinio(ctx, bucket, tempDir)
if err != nil {
return sourceBucket, err
}
}
revision, err := r.checksum(tempDir)
if err != nil {
return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err
}
// return early on unchanged revision
artifact := r.Storage.NewArtifactFor(bucket.Kind, bucket.GetObjectMeta(), revision, fmt.Sprintf("%s.tar.gz", revision))
if apimeta.IsStatusConditionTrue(bucket.Status.Conditions, meta.ReadyCondition) && bucket.GetArtifact().HasRevision(artifact.Revision) {
if artifact.URL != bucket.GetArtifact().URL {
r.Storage.SetArtifactURL(bucket.GetArtifact())
bucket.Status.URL = r.Storage.SetHostname(bucket.Status.URL)
}
return bucket, nil
}
// create artifact dir
err = r.Storage.MkdirAll(artifact)
if err != nil {
err = fmt.Errorf("mkdir dir error: %w", err)
return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err
}
// acquire lock
unlock, err := r.Storage.Lock(artifact)
if err != nil {
err = fmt.Errorf("unable to acquire lock: %w", err)
return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err
}
defer unlock()
// archive artifact and check integrity
if err := r.Storage.Archive(&artifact, tempDir, nil); err != nil {
err = fmt.Errorf("storage archive error: %w", err)
return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err
}
// update latest symlink
url, err := r.Storage.Symlink(artifact, "latest.tar.gz")
if err != nil {
err = fmt.Errorf("storage symlink error: %w", err)
return sourcev1.BucketNotReady(bucket, sourcev1.StorageOperationFailedReason, err.Error()), err
}
message := fmt.Sprintf("Fetched revision: %s", artifact.Revision)
return sourcev1.BucketReady(bucket, artifact, url, sourcev1.BucketOperationSucceedReason, message), nil
}
func (r *BucketReconciler) reconcileDelete(ctx context.Context, bucket sourcev1.Bucket) (ctrl.Result, error) {
if err := r.gc(bucket); err != nil {
r.event(ctx, bucket, events.EventSeverityError,
fmt.Sprintf("garbage collection for deleted resource failed: %s", err.Error()))
// Return the error so we retry the failed garbage collection
return ctrl.Result{}, err
}
// Record deleted status
r.recordReadiness(ctx, bucket)
// Remove our finalizer from the list and update it
controllerutil.RemoveFinalizer(&bucket, sourcev1.SourceFinalizer)
if err := r.Update(ctx, &bucket); err != nil {
return ctrl.Result{}, err
}
// Stop reconciliation as the object is being deleted
return ctrl.Result{}, nil
}
// reconcileWithGCP handles getting objects from a Google Cloud Platform bucket
// using a gcp client
func (r *BucketReconciler) reconcileWithGCP(ctx context.Context, bucket sourcev1.Bucket, tempDir string) (sourcev1.Bucket, error) {
log := ctrl.LoggerFrom(ctx)
gcpClient, err := r.authGCP(ctx, bucket)
if err != nil {
err = fmt.Errorf("auth error: %w", err)
return sourcev1.BucketNotReady(bucket, sourcev1.AuthenticationFailedReason, err.Error()), err
}
defer gcpClient.Close(log)
ctxTimeout, cancel := context.WithTimeout(ctx, bucket.Spec.Timeout.Duration)
defer cancel()
exists, err := gcpClient.BucketExists(ctxTimeout, bucket.Spec.BucketName)
if err != nil {
return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err
}
if !exists {
err = fmt.Errorf("bucket '%s' not found", bucket.Spec.BucketName)
return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err
}
// Look for file with ignore rules first.
path := filepath.Join(tempDir, sourceignore.IgnoreFile)
if err := gcpClient.FGetObject(ctxTimeout, bucket.Spec.BucketName, sourceignore.IgnoreFile, path); err != nil {
if err == gcp.ErrorObjectDoesNotExist && sourceignore.IgnoreFile != ".sourceignore" {
return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err
}
}
ps, err := sourceignore.ReadIgnoreFile(path, nil)
if err != nil {
return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err
}
// In-spec patterns take precedence
if bucket.Spec.Ignore != nil {
ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*bucket.Spec.Ignore), nil)...)
}
matcher := sourceignore.NewMatcher(ps)
objects := gcpClient.ListObjects(ctxTimeout, bucket.Spec.BucketName, nil)
// download bucket content
for {
object, err := objects.Next()
if err == gcp.IteratorDone {
break
}
if err != nil {
err = fmt.Errorf("listing objects from bucket '%s' failed: %w", bucket.Spec.BucketName, err)
return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err
}
if strings.HasSuffix(object.Name, "/") || object.Name == sourceignore.IgnoreFile {
continue
}
if matcher.Match(strings.Split(object.Name, "/"), false) {
continue
}
localPath := filepath.Join(tempDir, object.Name)
if err = gcpClient.FGetObject(ctxTimeout, bucket.Spec.BucketName, object.Name, localPath); err != nil {
err = fmt.Errorf("downloading object from bucket '%s' failed: %w", bucket.Spec.BucketName, err)
return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err
}
}
return sourcev1.Bucket{}, nil
}
// reconcileWithMinio handles getting objects from an S3 compatible bucket
// using a minio client
func (r *BucketReconciler) reconcileWithMinio(ctx context.Context, bucket sourcev1.Bucket, tempDir string) (sourcev1.Bucket, error) {
s3Client, err := r.authMinio(ctx, bucket)
if err != nil {
err = fmt.Errorf("auth error: %w", err)
return sourcev1.BucketNotReady(bucket, sourcev1.AuthenticationFailedReason, err.Error()), err
}
ctxTimeout, cancel := context.WithTimeout(ctx, bucket.Spec.Timeout.Duration)
defer cancel()
exists, err := s3Client.BucketExists(ctxTimeout, bucket.Spec.BucketName)
if err != nil {
return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err
}
if !exists {
err = fmt.Errorf("bucket '%s' not found", bucket.Spec.BucketName)
return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err
}
// Look for file with ignore rules first
// NB: S3 has flat filepath keys making it impossible to look
// for files in "subdirectories" without building up a tree first.
path := filepath.Join(tempDir, sourceignore.IgnoreFile)
if err := s3Client.FGetObject(ctxTimeout, bucket.Spec.BucketName, sourceignore.IgnoreFile, path, minio.GetObjectOptions{}); err != nil {
if resp, ok := err.(minio.ErrorResponse); ok && resp.Code != "NoSuchKey" {
return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err
}
}
ps, err := sourceignore.ReadIgnoreFile(path, nil)
if err != nil {
return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err
}
// In-spec patterns take precedence
if bucket.Spec.Ignore != nil {
ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*bucket.Spec.Ignore), nil)...)
}
matcher := sourceignore.NewMatcher(ps)
// download bucket content
for object := range s3Client.ListObjects(ctxTimeout, bucket.Spec.BucketName, minio.ListObjectsOptions{
Recursive: true,
UseV1: s3utils.IsGoogleEndpoint(*s3Client.EndpointURL()),
}) {
if object.Err != nil {
err = fmt.Errorf("listing objects from bucket '%s' failed: %w", bucket.Spec.BucketName, object.Err)
return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err
}
if strings.HasSuffix(object.Key, "/") || object.Key == sourceignore.IgnoreFile {
continue
}
if matcher.Match(strings.Split(object.Key, "/"), false) {
continue
}
localPath := filepath.Join(tempDir, object.Key)
err := s3Client.FGetObject(ctxTimeout, bucket.Spec.BucketName, object.Key, localPath, minio.GetObjectOptions{})
if err != nil {
err = fmt.Errorf("downloading object from bucket '%s' failed: %w", bucket.Spec.BucketName, err)
return sourcev1.BucketNotReady(bucket, sourcev1.BucketOperationFailedReason, err.Error()), err
}
}
return sourcev1.Bucket{}, nil
}
// authGCP creates a new Google Cloud Platform storage client
// to interact with the storage service.
func (r *BucketReconciler) authGCP(ctx context.Context, bucket sourcev1.Bucket) (*gcp.GCPClient, error) {
var client *gcp.GCPClient
var err error
if bucket.Spec.SecretRef != nil {
secretName := types.NamespacedName{
Namespace: bucket.GetNamespace(),
Name: bucket.Spec.SecretRef.Name,
}
var secret corev1.Secret
if err := r.Get(ctx, secretName, &secret); err != nil {
return nil, fmt.Errorf("credentials secret error: %w", err)
}
if err := gcp.ValidateSecret(secret.Data, secret.Name); err != nil {
return nil, err
}
client, err = gcp.NewClient(ctx, option.WithCredentialsJSON(secret.Data["serviceaccount"]))
if err != nil {
return nil, err
}
} else {
client, err = gcp.NewClient(ctx)
if err != nil {
return nil, err
}
}
return client, nil
}
// authMinio creates a new Minio client to interact with S3
// compatible storage services.
func (r *BucketReconciler) authMinio(ctx context.Context, bucket sourcev1.Bucket) (*minio.Client, error) {
opt := minio.Options{
Region: bucket.Spec.Region,
Secure: !bucket.Spec.Insecure,
}
if bucket.Spec.SecretRef != nil {
secretName := types.NamespacedName{
Namespace: bucket.GetNamespace(),
Name: bucket.Spec.SecretRef.Name,
}
var secret corev1.Secret
if err := r.Get(ctx, secretName, &secret); err != nil {
return nil, fmt.Errorf("credentials secret error: %w", err)
}
accesskey := ""
secretkey := ""
if k, ok := secret.Data["accesskey"]; ok {
accesskey = string(k)
}
if k, ok := secret.Data["secretkey"]; ok {
secretkey = string(k)
}
if accesskey == "" || secretkey == "" {
return nil, fmt.Errorf("invalid '%s' secret data: required fields 'accesskey' and 'secretkey'", secret.Name)
}
opt.Creds = credentials.NewStaticV4(accesskey, secretkey, "")
} else if bucket.Spec.Provider == sourcev1.AmazonBucketProvider {
opt.Creds = credentials.NewIAM("")
}
if opt.Creds == nil {
return nil, fmt.Errorf("no bucket credentials found")
}
return minio.New(bucket.Spec.Endpoint, &opt)
}
// checksum calculates the SHA1 checksum of the given root directory.
// It traverses the given root directory and calculates the checksum for any found file, and returns the SHA1 sum of the
// list with relative file paths and their checksums.
func (r *BucketReconciler) checksum(root string) (string, error) {
sum := sha1.New()
if err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.Mode().IsRegular() {
return nil
}
data, err := os.ReadFile(path)
if err != nil {
return err
}
relPath, err := filepath.Rel(root, path)
if err != nil {
return err
}
sum.Write([]byte(fmt.Sprintf("%x %s\n", sha1.Sum(data), relPath)))
return nil
}); err != nil {
return "", err
}
return fmt.Sprintf("%x", sum.Sum(nil)), nil
}
// resetStatus returns a modified v1beta1.Bucket and a boolean indicating
// if the status field has been reset.
func (r *BucketReconciler) resetStatus(bucket sourcev1.Bucket) (sourcev1.Bucket, bool) {
// We do not have an artifact, or it does no longer exist
if bucket.GetArtifact() == nil || !r.Storage.ArtifactExist(*bucket.GetArtifact()) {
bucket = sourcev1.BucketProgressing(bucket)
bucket.Status.Artifact = nil
return bucket, true
}
if bucket.Generation != bucket.Status.ObservedGeneration {
return sourcev1.BucketProgressing(bucket), true
}
return bucket, false
}
// gc performs a garbage collection for the given v1beta1.Bucket.
// It removes all but the current artifact except for when the
// deletion timestamp is set, which will result in the removal of
// all artifacts for the resource.
func (r *BucketReconciler) gc(bucket sourcev1.Bucket) error {
if !bucket.DeletionTimestamp.IsZero() {
return r.Storage.RemoveAll(r.Storage.NewArtifactFor(bucket.Kind, bucket.GetObjectMeta(), "", "*"))
}
if bucket.GetArtifact() != nil {
return r.Storage.RemoveAllButCurrent(*bucket.GetArtifact())
}
return nil
}
// event emits a Kubernetes event and forwards the event to notification controller if configured
func (r *BucketReconciler) event(ctx context.Context, bucket sourcev1.Bucket, severity, msg string) {
log := ctrl.LoggerFrom(ctx)
if r.EventRecorder != nil {
r.EventRecorder.Eventf(&bucket, "Normal", severity, msg)
}
if r.ExternalEventRecorder != nil {
objRef, err := reference.GetReference(r.Scheme, &bucket)
if err != nil {
log.Error(err, "unable to send event")
return
}
if err := r.ExternalEventRecorder.Eventf(*objRef, nil, severity, severity, msg); err != nil {
log.Error(err, "unable to send event")
return
}
}
}
func (r *BucketReconciler) recordReadiness(ctx context.Context, bucket sourcev1.Bucket) {
log := ctrl.LoggerFrom(ctx)
if r.MetricsRecorder == nil {
return
}
objRef, err := reference.GetReference(r.Scheme, &bucket)
if err != nil {
log.Error(err, "unable to record readiness metric")
return
}
if rc := apimeta.FindStatusCondition(bucket.Status.Conditions, meta.ReadyCondition); rc != nil {
r.MetricsRecorder.RecordCondition(*objRef, *rc, !bucket.DeletionTimestamp.IsZero())
} else {
r.MetricsRecorder.RecordCondition(*objRef, metav1.Condition{
Type: meta.ReadyCondition,
Status: metav1.ConditionUnknown,
}, !bucket.DeletionTimestamp.IsZero())
}
}
func (r *BucketReconciler) recordSuspension(ctx context.Context, bucket sourcev1.Bucket) {
if r.MetricsRecorder == nil {
return
}
log := ctrl.LoggerFrom(ctx)
objRef, err := reference.GetReference(r.Scheme, &bucket)
if err != nil {
log.Error(err, "unable to record suspended metric")
return
}
if !bucket.DeletionTimestamp.IsZero() {
r.MetricsRecorder.RecordSuspend(*objRef, false)
} else {
r.MetricsRecorder.RecordSuspend(*objRef, bucket.Spec.Suspend)
}
}
func (r *BucketReconciler) updateStatus(ctx context.Context, req ctrl.Request, newStatus sourcev1.BucketStatus) error {
var bucket sourcev1.Bucket
if err := r.Get(ctx, req.NamespacedName, &bucket); err != nil {
return err
}
patch := client.MergeFrom(bucket.DeepCopy())
bucket.Status = newStatus
return r.Status().Patch(ctx, &bucket, patch)
}

View File

@ -0,0 +1,82 @@
/*
Copyright 2021 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"os"
"path/filepath"
"testing"
)
func TestBucketReconciler_checksum(t *testing.T) {
tests := []struct {
name string
beforeFunc func(root string)
want string
wantErr bool
}{
{
name: "empty root",
want: "da39a3ee5e6b4b0d3255bfef95601890afd80709",
},
{
name: "with file",
beforeFunc: func(root string) {
mockFile(root, "a/b/c.txt", "a dummy string")
},
want: "309a5e6e96b4a7eea0d1cfaabf1be8ec1c063fa0",
},
{
name: "with file in different path",
beforeFunc: func(root string) {
mockFile(root, "a/b.txt", "a dummy string")
},
want: "e28c62b5cc488849950c4355dddc5523712616d4",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
root, err := os.MkdirTemp("", "bucket-checksum-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(root)
if tt.beforeFunc != nil {
tt.beforeFunc(root)
}
got, err := (&BucketReconciler{}).checksum(root)
if (err != nil) != tt.wantErr {
t.Errorf("checksum() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("checksum() got = %v, want %v", got, tt.want)
}
})
}
}
func mockFile(root, path, content string) error {
filePath := filepath.Join(root, path)
if err := os.MkdirAll(filepath.Dir(filePath), os.ModePerm); err != nil {
panic(err)
}
if err := os.WriteFile(filePath, []byte(content), 0644); err != nil {
panic(err)
}
return nil
}

View File

@ -0,0 +1,494 @@
/*
Copyright 2020 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
"time"
securejoin "github.com/cyphar/filepath-securejoin"
corev1 "k8s.io/api/core/v1"
apimeta "k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
kuberecorder "k8s.io/client-go/tools/record"
"k8s.io/client-go/tools/reference"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"github.com/fluxcd/pkg/apis/meta"
"github.com/fluxcd/pkg/runtime/events"
"github.com/fluxcd/pkg/runtime/metrics"
"github.com/fluxcd/pkg/runtime/predicates"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta1"
"github.com/fluxcd/source-controller/pkg/git"
"github.com/fluxcd/source-controller/pkg/git/strategy"
"github.com/fluxcd/source-controller/pkg/sourceignore"
)
// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=gitrepositories,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=gitrepositories/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=gitrepositories/finalizers,verbs=get;create;update;patch;delete
// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch
// GitRepositoryReconciler reconciles a GitRepository object
type GitRepositoryReconciler struct {
client.Client
requeueDependency time.Duration
Scheme *runtime.Scheme
Storage *Storage
EventRecorder kuberecorder.EventRecorder
ExternalEventRecorder *events.Recorder
MetricsRecorder *metrics.Recorder
}
type GitRepositoryReconcilerOptions struct {
MaxConcurrentReconciles int
DependencyRequeueInterval time.Duration
}
func (r *GitRepositoryReconciler) SetupWithManager(mgr ctrl.Manager) error {
return r.SetupWithManagerAndOptions(mgr, GitRepositoryReconcilerOptions{})
}
func (r *GitRepositoryReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts GitRepositoryReconcilerOptions) error {
r.requeueDependency = opts.DependencyRequeueInterval
return ctrl.NewControllerManagedBy(mgr).
For(&sourcev1.GitRepository{}, builder.WithPredicates(
predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{}),
)).
WithOptions(controller.Options{MaxConcurrentReconciles: opts.MaxConcurrentReconciles}).
Complete(r)
}
func (r *GitRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
start := time.Now()
log := ctrl.LoggerFrom(ctx)
var repository sourcev1.GitRepository
if err := r.Get(ctx, req.NamespacedName, &repository); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
// Record suspended status metric
defer r.recordSuspension(ctx, repository)
// Add our finalizer if it does not exist
if !controllerutil.ContainsFinalizer(&repository, sourcev1.SourceFinalizer) {
patch := client.MergeFrom(repository.DeepCopy())
controllerutil.AddFinalizer(&repository, sourcev1.SourceFinalizer)
if err := r.Patch(ctx, &repository, patch); err != nil {
log.Error(err, "unable to register finalizer")
return ctrl.Result{}, err
}
}
// Examine if the object is under deletion
if !repository.ObjectMeta.DeletionTimestamp.IsZero() {
return r.reconcileDelete(ctx, repository)
}
// Return early if the object is suspended.
if repository.Spec.Suspend {
log.Info("Reconciliation is suspended for this object")
return ctrl.Result{}, nil
}
// check dependencies
if len(repository.Spec.Include) > 0 {
if err := r.checkDependencies(repository); err != nil {
repository = sourcev1.GitRepositoryNotReady(repository, meta.DependencyNotReadyReason, err.Error())
if err := r.updateStatus(ctx, req, repository.Status); err != nil {
log.Error(err, "unable to update status for dependency not ready")
return ctrl.Result{Requeue: true}, err
}
// we can't rely on exponential backoff because it will prolong the execution too much,
// instead we requeue on a fix interval.
msg := fmt.Sprintf("Dependencies do not meet ready condition, retrying in %s", r.requeueDependency.String())
log.Info(msg)
r.event(ctx, repository, events.EventSeverityInfo, msg)
r.recordReadiness(ctx, repository)
return ctrl.Result{RequeueAfter: r.requeueDependency}, nil
}
log.Info("All dependencies area ready, proceeding with reconciliation")
}
// record reconciliation duration
if r.MetricsRecorder != nil {
objRef, err := reference.GetReference(r.Scheme, &repository)
if err != nil {
return ctrl.Result{}, err
}
defer r.MetricsRecorder.RecordDuration(*objRef, start)
}
// set initial status
if resetRepository, ok := r.resetStatus(repository); ok {
repository = resetRepository
if err := r.updateStatus(ctx, req, repository.Status); err != nil {
log.Error(err, "unable to update status")
return ctrl.Result{Requeue: true}, err
}
r.recordReadiness(ctx, repository)
}
// record the value of the reconciliation request, if any
// TODO(hidde): would be better to defer this in combination with
// always patching the status sub-resource after a reconciliation.
if v, ok := meta.ReconcileAnnotationValue(repository.GetAnnotations()); ok {
repository.Status.SetLastHandledReconcileRequest(v)
}
// purge old artifacts from storage
if err := r.gc(repository); err != nil {
log.Error(err, "unable to purge old artifacts")
}
// reconcile repository by pulling the latest Git commit
reconciledRepository, reconcileErr := r.reconcile(ctx, *repository.DeepCopy())
// update status with the reconciliation result
if err := r.updateStatus(ctx, req, reconciledRepository.Status); err != nil {
log.Error(err, "unable to update status")
return ctrl.Result{Requeue: true}, err
}
// if reconciliation failed, record the failure and requeue immediately
if reconcileErr != nil {
r.event(ctx, reconciledRepository, events.EventSeverityError, reconcileErr.Error())
r.recordReadiness(ctx, reconciledRepository)
return ctrl.Result{Requeue: true}, reconcileErr
}
// emit revision change event
if repository.Status.Artifact == nil || reconciledRepository.Status.Artifact.Revision != repository.Status.Artifact.Revision {
r.event(ctx, reconciledRepository, events.EventSeverityInfo, sourcev1.GitRepositoryReadyMessage(reconciledRepository))
}
r.recordReadiness(ctx, reconciledRepository)
log.Info(fmt.Sprintf("Reconciliation finished in %s, next run in %s",
time.Since(start).String(),
repository.GetInterval().Duration.String(),
))
return ctrl.Result{RequeueAfter: repository.GetInterval().Duration}, nil
}
func (r *GitRepositoryReconciler) checkDependencies(repository sourcev1.GitRepository) error {
for _, d := range repository.Spec.Include {
dName := types.NamespacedName{Name: d.GitRepositoryRef.Name, Namespace: repository.Namespace}
var gr sourcev1.GitRepository
err := r.Get(context.Background(), dName, &gr)
if err != nil {
return fmt.Errorf("unable to get '%s' dependency: %w", dName, err)
}
if len(gr.Status.Conditions) == 0 || gr.Generation != gr.Status.ObservedGeneration {
return fmt.Errorf("dependency '%s' is not ready", dName)
}
if !apimeta.IsStatusConditionTrue(gr.Status.Conditions, meta.ReadyCondition) {
return fmt.Errorf("dependency '%s' is not ready", dName)
}
}
return nil
}
func (r *GitRepositoryReconciler) reconcile(ctx context.Context, repository sourcev1.GitRepository) (sourcev1.GitRepository, error) {
log := ctrl.LoggerFrom(ctx)
// create tmp dir for the Git clone
tmpGit, err := os.MkdirTemp("", repository.Name)
if err != nil {
err = fmt.Errorf("tmp dir error: %w", err)
return sourcev1.GitRepositoryNotReady(repository, sourcev1.StorageOperationFailedReason, err.Error()), err
}
defer func() {
if err := os.RemoveAll(tmpGit); err != nil {
log.Error(err, "failed to remove working directory", "path", tmpGit)
}
}()
// Configure auth options using secret
var authOpts *git.AuthOptions
if repository.Spec.SecretRef != nil {
name := types.NamespacedName{
Namespace: repository.GetNamespace(),
Name: repository.Spec.SecretRef.Name,
}
secret := &corev1.Secret{}
err = r.Client.Get(ctx, name, secret)
if err != nil {
err = fmt.Errorf("auth secret error: %w", err)
return sourcev1.GitRepositoryNotReady(repository, sourcev1.AuthenticationFailedReason, err.Error()), err
}
authOpts, err = git.AuthOptionsFromSecret(repository.Spec.URL, secret)
if err != nil {
return sourcev1.GitRepositoryNotReady(repository, sourcev1.AuthenticationFailedReason, err.Error()), err
}
}
checkoutOpts := git.CheckoutOptions{RecurseSubmodules: repository.Spec.RecurseSubmodules}
if ref := repository.Spec.Reference; ref != nil {
checkoutOpts.Branch = ref.Branch
checkoutOpts.Commit = ref.Commit
checkoutOpts.Tag = ref.Tag
checkoutOpts.SemVer = ref.SemVer
}
checkoutStrategy, err := strategy.CheckoutStrategyForImplementation(ctx,
git.Implementation(repository.Spec.GitImplementation), checkoutOpts)
if err != nil {
return sourcev1.GitRepositoryNotReady(repository, sourcev1.GitOperationFailedReason, err.Error()), err
}
gitCtx, cancel := context.WithTimeout(ctx, repository.Spec.Timeout.Duration)
defer cancel()
commit, err := checkoutStrategy.Checkout(gitCtx, tmpGit, repository.Spec.URL, authOpts)
if err != nil {
return sourcev1.GitRepositoryNotReady(repository, sourcev1.GitOperationFailedReason, err.Error()), err
}
artifact := r.Storage.NewArtifactFor(repository.Kind, repository.GetObjectMeta(), commit.String(), fmt.Sprintf("%s.tar.gz", commit.Hash.String()))
// copy all included repository into the artifact
includedArtifacts := []*sourcev1.Artifact{}
for _, incl := range repository.Spec.Include {
dName := types.NamespacedName{Name: incl.GitRepositoryRef.Name, Namespace: repository.Namespace}
var gr sourcev1.GitRepository
err := r.Get(context.Background(), dName, &gr)
if err != nil {
return sourcev1.GitRepositoryNotReady(repository, meta.DependencyNotReadyReason, err.Error()), err
}
includedArtifacts = append(includedArtifacts, gr.GetArtifact())
}
// return early on unchanged revision and unchanged included repositories
if apimeta.IsStatusConditionTrue(repository.Status.Conditions, meta.ReadyCondition) && repository.GetArtifact().HasRevision(artifact.Revision) && !hasArtifactUpdated(repository.Status.IncludedArtifacts, includedArtifacts) {
if artifact.URL != repository.GetArtifact().URL {
r.Storage.SetArtifactURL(repository.GetArtifact())
repository.Status.URL = r.Storage.SetHostname(repository.Status.URL)
}
return repository, nil
}
// verify PGP signature
if repository.Spec.Verification != nil {
publicKeySecret := types.NamespacedName{
Namespace: repository.Namespace,
Name: repository.Spec.Verification.SecretRef.Name,
}
secret := &corev1.Secret{}
if err := r.Client.Get(ctx, publicKeySecret, secret); err != nil {
err = fmt.Errorf("PGP public keys secret error: %w", err)
return sourcev1.GitRepositoryNotReady(repository, sourcev1.VerificationFailedReason, err.Error()), err
}
var keyRings []string
for _, v := range secret.Data {
keyRings = append(keyRings, string(v))
}
if _, err = commit.Verify(keyRings...); err != nil {
return sourcev1.GitRepositoryNotReady(repository, sourcev1.VerificationFailedReason, err.Error()), err
}
}
// create artifact dir
err = r.Storage.MkdirAll(artifact)
if err != nil {
err = fmt.Errorf("mkdir dir error: %w", err)
return sourcev1.GitRepositoryNotReady(repository, sourcev1.StorageOperationFailedReason, err.Error()), err
}
for i, incl := range repository.Spec.Include {
toPath, err := securejoin.SecureJoin(tmpGit, incl.GetToPath())
if err != nil {
return sourcev1.GitRepositoryNotReady(repository, meta.DependencyNotReadyReason, err.Error()), err
}
err = r.Storage.CopyToPath(includedArtifacts[i], incl.GetFromPath(), toPath)
if err != nil {
return sourcev1.GitRepositoryNotReady(repository, meta.DependencyNotReadyReason, err.Error()), err
}
}
// acquire lock
unlock, err := r.Storage.Lock(artifact)
if err != nil {
err = fmt.Errorf("unable to acquire lock: %w", err)
return sourcev1.GitRepositoryNotReady(repository, sourcev1.StorageOperationFailedReason, err.Error()), err
}
defer unlock()
// archive artifact and check integrity
ignoreDomain := strings.Split(tmpGit, string(filepath.Separator))
ps, err := sourceignore.LoadIgnorePatterns(tmpGit, ignoreDomain)
if err != nil {
err = fmt.Errorf(".sourceignore error: %w", err)
return sourcev1.GitRepositoryNotReady(repository, sourcev1.StorageOperationFailedReason, err.Error()), err
}
if repository.Spec.Ignore != nil {
ps = append(ps, sourceignore.ReadPatterns(strings.NewReader(*repository.Spec.Ignore), ignoreDomain)...)
}
if err := r.Storage.Archive(&artifact, tmpGit, SourceIgnoreFilter(ps, ignoreDomain)); err != nil {
err = fmt.Errorf("storage archive error: %w", err)
return sourcev1.GitRepositoryNotReady(repository, sourcev1.StorageOperationFailedReason, err.Error()), err
}
// update latest symlink
url, err := r.Storage.Symlink(artifact, "latest.tar.gz")
if err != nil {
err = fmt.Errorf("storage symlink error: %w", err)
return sourcev1.GitRepositoryNotReady(repository, sourcev1.StorageOperationFailedReason, err.Error()), err
}
message := fmt.Sprintf("Fetched revision: %s", artifact.Revision)
return sourcev1.GitRepositoryReady(repository, artifact, includedArtifacts, url, sourcev1.GitOperationSucceedReason, message), nil
}
func (r *GitRepositoryReconciler) reconcileDelete(ctx context.Context, repository sourcev1.GitRepository) (ctrl.Result, error) {
if err := r.gc(repository); err != nil {
r.event(ctx, repository, events.EventSeverityError,
fmt.Sprintf("garbage collection for deleted resource failed: %s", err.Error()))
// Return the error so we retry the failed garbage collection
return ctrl.Result{}, err
}
// Record deleted status
r.recordReadiness(ctx, repository)
// Remove our finalizer from the list and update it
controllerutil.RemoveFinalizer(&repository, sourcev1.SourceFinalizer)
if err := r.Update(ctx, &repository); err != nil {
return ctrl.Result{}, err
}
// Stop reconciliation as the object is being deleted
return ctrl.Result{}, nil
}
// resetStatus returns a modified v1beta1.GitRepository and a boolean indicating
// if the status field has been reset.
func (r *GitRepositoryReconciler) resetStatus(repository sourcev1.GitRepository) (sourcev1.GitRepository, bool) {
// We do not have an artifact, or it does no longer exist
if repository.GetArtifact() == nil || !r.Storage.ArtifactExist(*repository.GetArtifact()) {
repository = sourcev1.GitRepositoryProgressing(repository)
repository.Status.Artifact = nil
return repository, true
}
if repository.Generation != repository.Status.ObservedGeneration {
return sourcev1.GitRepositoryProgressing(repository), true
}
return repository, false
}
// gc performs a garbage collection for the given v1beta1.GitRepository.
// It removes all but the current artifact except for when the
// deletion timestamp is set, which will result in the removal of
// all artifacts for the resource.
func (r *GitRepositoryReconciler) gc(repository sourcev1.GitRepository) error {
if !repository.DeletionTimestamp.IsZero() {
return r.Storage.RemoveAll(r.Storage.NewArtifactFor(repository.Kind, repository.GetObjectMeta(), "", "*"))
}
if repository.GetArtifact() != nil {
return r.Storage.RemoveAllButCurrent(*repository.GetArtifact())
}
return nil
}
// event emits a Kubernetes event and forwards the event to notification controller if configured
func (r *GitRepositoryReconciler) event(ctx context.Context, repository sourcev1.GitRepository, severity, msg string) {
log := ctrl.LoggerFrom(ctx)
if r.EventRecorder != nil {
r.EventRecorder.Eventf(&repository, "Normal", severity, msg)
}
if r.ExternalEventRecorder != nil {
objRef, err := reference.GetReference(r.Scheme, &repository)
if err != nil {
log.Error(err, "unable to send event")
return
}
if err := r.ExternalEventRecorder.Eventf(*objRef, nil, severity, severity, msg); err != nil {
log.Error(err, "unable to send event")
return
}
}
}
func (r *GitRepositoryReconciler) recordReadiness(ctx context.Context, repository sourcev1.GitRepository) {
log := ctrl.LoggerFrom(ctx)
if r.MetricsRecorder == nil {
return
}
objRef, err := reference.GetReference(r.Scheme, &repository)
if err != nil {
log.Error(err, "unable to record readiness metric")
return
}
if rc := apimeta.FindStatusCondition(repository.Status.Conditions, meta.ReadyCondition); rc != nil {
r.MetricsRecorder.RecordCondition(*objRef, *rc, !repository.DeletionTimestamp.IsZero())
} else {
r.MetricsRecorder.RecordCondition(*objRef, metav1.Condition{
Type: meta.ReadyCondition,
Status: metav1.ConditionUnknown,
}, !repository.DeletionTimestamp.IsZero())
}
}
func (r *GitRepositoryReconciler) recordSuspension(ctx context.Context, gitrepository sourcev1.GitRepository) {
if r.MetricsRecorder == nil {
return
}
log := ctrl.LoggerFrom(ctx)
objRef, err := reference.GetReference(r.Scheme, &gitrepository)
if err != nil {
log.Error(err, "unable to record suspended metric")
return
}
if !gitrepository.DeletionTimestamp.IsZero() {
r.MetricsRecorder.RecordSuspend(*objRef, false)
} else {
r.MetricsRecorder.RecordSuspend(*objRef, gitrepository.Spec.Suspend)
}
}
func (r *GitRepositoryReconciler) updateStatus(ctx context.Context, req ctrl.Request, newStatus sourcev1.GitRepositoryStatus) error {
var repository sourcev1.GitRepository
if err := r.Get(ctx, req.NamespacedName, &repository); err != nil {
return err
}
patch := client.MergeFrom(repository.DeepCopy())
repository.Status = newStatus
return r.Status().Patch(ctx, &repository, patch)
}

View File

@ -0,0 +1,770 @@
/*
Copyright 2020 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"crypto/tls"
"fmt"
"net/http"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"time"
"github.com/go-git/go-billy/v5/memfs"
"github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/config"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/object"
"github.com/go-git/go-git/v5/plumbing/transport/client"
httptransport "github.com/go-git/go-git/v5/plumbing/transport/http"
"github.com/go-git/go-git/v5/storage/memory"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/extensions/table"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"github.com/fluxcd/pkg/apis/meta"
"github.com/fluxcd/pkg/gittestserver"
"github.com/fluxcd/pkg/untar"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta1"
)
var _ = Describe("GitRepositoryReconciler", func() {
const (
timeout = time.Second * 30
interval = time.Second * 1
indexInterval = time.Second * 1
)
Context("GitRepository", func() {
var (
namespace *corev1.Namespace
gitServer *gittestserver.GitServer
err error
)
BeforeEach(func() {
namespace = &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{Name: "git-repository-test" + randStringRunes(5)},
}
err = k8sClient.Create(context.Background(), namespace)
Expect(err).NotTo(HaveOccurred(), "failed to create test namespace")
cert := corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "cert",
Namespace: namespace.Name,
},
Data: map[string][]byte{
"caFile": exampleCA,
},
}
err = k8sClient.Create(context.Background(), &cert)
Expect(err).NotTo(HaveOccurred())
gitServer, err = gittestserver.NewTempGitServer()
Expect(err).NotTo(HaveOccurred())
gitServer.AutoCreate()
})
AfterEach(func() {
os.RemoveAll(gitServer.Root())
err = k8sClient.Delete(context.Background(), namespace)
Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace")
})
type refTestCase struct {
reference *sourcev1.GitRepositoryRef
createRefs []string
waitForReason string
expectStatus metav1.ConditionStatus
expectMessage string
expectRevision string
secretRef *meta.LocalObjectReference
gitImplementation string
}
DescribeTable("Git references tests", func(t refTestCase) {
err = gitServer.StartHTTP()
defer gitServer.StopHTTP()
Expect(err).NotTo(HaveOccurred())
u, err := url.Parse(gitServer.HTTPAddress())
Expect(err).NotTo(HaveOccurred())
u.Path = path.Join(u.Path, fmt.Sprintf("repository-%s.git", randStringRunes(5)))
fs := memfs.New()
gitrepo, err := git.Init(memory.NewStorage(), fs)
Expect(err).NotTo(HaveOccurred())
wt, err := gitrepo.Worktree()
Expect(err).NotTo(HaveOccurred())
ff, _ := fs.Create("fixture")
_ = ff.Close()
_, err = wt.Add(fs.Join("fixture"))
Expect(err).NotTo(HaveOccurred())
commit, err := wt.Commit("Sample", &git.CommitOptions{Author: &object.Signature{
Name: "John Doe",
Email: "john@example.com",
When: time.Now(),
}})
Expect(err).NotTo(HaveOccurred())
for _, ref := range t.createRefs {
hRef := plumbing.NewHashReference(plumbing.ReferenceName(ref), commit)
err = gitrepo.Storer.SetReference(hRef)
Expect(err).NotTo(HaveOccurred())
}
remote, err := gitrepo.CreateRemote(&config.RemoteConfig{
Name: "origin",
URLs: []string{u.String()},
})
Expect(err).NotTo(HaveOccurred())
err = remote.Push(&git.PushOptions{
RefSpecs: []config.RefSpec{"refs/heads/*:refs/heads/*", "refs/tags/*:refs/tags/*"},
})
Expect(err).NotTo(HaveOccurred())
t.reference.Commit = strings.Replace(t.reference.Commit, "<commit>", commit.String(), 1)
key := types.NamespacedName{
Name: fmt.Sprintf("git-ref-test-%s", randStringRunes(5)),
Namespace: namespace.Name,
}
created := &sourcev1.GitRepository{
ObjectMeta: metav1.ObjectMeta{
Name: key.Name,
Namespace: key.Namespace,
},
Spec: sourcev1.GitRepositorySpec{
URL: u.String(),
Interval: metav1.Duration{Duration: indexInterval},
Reference: t.reference,
},
}
Expect(k8sClient.Create(context.Background(), created)).Should(Succeed())
defer k8sClient.Delete(context.Background(), created)
got := &sourcev1.GitRepository{}
var cond metav1.Condition
Eventually(func() bool {
_ = k8sClient.Get(context.Background(), key, got)
for _, c := range got.Status.Conditions {
if c.Reason == t.waitForReason {
cond = c
return true
}
}
return false
}, timeout, interval).Should(BeTrue())
Expect(cond.Status).To(Equal(t.expectStatus))
Expect(cond.Message).To(ContainSubstring(t.expectMessage))
Expect(got.Status.Artifact == nil).To(Equal(t.expectRevision == ""))
if t.expectRevision != "" {
Expect(got.Status.Artifact.Revision).To(Equal(t.expectRevision + "/" + commit.String()))
}
},
Entry("branch", refTestCase{
reference: &sourcev1.GitRepositoryRef{Branch: "some-branch"},
createRefs: []string{"refs/heads/some-branch"},
waitForReason: sourcev1.GitOperationSucceedReason,
expectStatus: metav1.ConditionTrue,
expectRevision: "some-branch",
}),
Entry("branch non existing", refTestCase{
reference: &sourcev1.GitRepositoryRef{Branch: "invalid-branch"},
waitForReason: sourcev1.GitOperationFailedReason,
expectStatus: metav1.ConditionFalse,
expectMessage: "couldn't find remote ref",
}),
Entry("tag", refTestCase{
reference: &sourcev1.GitRepositoryRef{Tag: "some-tag"},
createRefs: []string{"refs/tags/some-tag"},
waitForReason: sourcev1.GitOperationSucceedReason,
expectStatus: metav1.ConditionTrue,
expectRevision: "some-tag",
}),
Entry("tag non existing", refTestCase{
reference: &sourcev1.GitRepositoryRef{Tag: "invalid-tag"},
waitForReason: sourcev1.GitOperationFailedReason,
expectStatus: metav1.ConditionFalse,
expectMessage: "couldn't find remote ref",
}),
Entry("semver", refTestCase{
reference: &sourcev1.GitRepositoryRef{SemVer: "1.0.0"},
createRefs: []string{"refs/tags/v1.0.0"},
waitForReason: sourcev1.GitOperationSucceedReason,
expectStatus: metav1.ConditionTrue,
expectRevision: "v1.0.0",
}),
Entry("semver range", refTestCase{
reference: &sourcev1.GitRepositoryRef{SemVer: ">=0.1.0 <1.0.0"},
createRefs: []string{"refs/tags/0.1.0", "refs/tags/0.1.1", "refs/tags/0.2.0", "refs/tags/1.0.0"},
waitForReason: sourcev1.GitOperationSucceedReason,
expectStatus: metav1.ConditionTrue,
expectRevision: "0.2.0",
}),
Entry("mixed semver range", refTestCase{
reference: &sourcev1.GitRepositoryRef{SemVer: ">=0.1.0 <1.0.0"},
createRefs: []string{"refs/tags/0.1.0", "refs/tags/v0.1.1", "refs/tags/v0.2.0", "refs/tags/1.0.0"},
waitForReason: sourcev1.GitOperationSucceedReason,
expectStatus: metav1.ConditionTrue,
expectRevision: "v0.2.0",
}),
Entry("semver invalid", refTestCase{
reference: &sourcev1.GitRepositoryRef{SemVer: "1.2.3.4"},
waitForReason: sourcev1.GitOperationFailedReason,
expectStatus: metav1.ConditionFalse,
expectMessage: "semver parse error: improper constraint: 1.2.3.4",
}),
Entry("semver no match", refTestCase{
reference: &sourcev1.GitRepositoryRef{SemVer: "1.0.0"},
waitForReason: sourcev1.GitOperationFailedReason,
expectStatus: metav1.ConditionFalse,
expectMessage: "no match found for semver: 1.0.0",
}),
Entry("commit", refTestCase{
reference: &sourcev1.GitRepositoryRef{
Commit: "<commit>",
},
waitForReason: sourcev1.GitOperationSucceedReason,
expectStatus: metav1.ConditionTrue,
expectRevision: "HEAD",
}),
Entry("commit in branch", refTestCase{
reference: &sourcev1.GitRepositoryRef{
Branch: "some-branch",
Commit: "<commit>",
},
createRefs: []string{"refs/heads/some-branch"},
waitForReason: sourcev1.GitOperationSucceedReason,
expectStatus: metav1.ConditionTrue,
expectRevision: "some-branch",
}),
Entry("invalid commit", refTestCase{
reference: &sourcev1.GitRepositoryRef{
Branch: "master",
Commit: "invalid",
},
waitForReason: sourcev1.GitOperationFailedReason,
expectStatus: metav1.ConditionFalse,
expectMessage: "failed to resolve commit object for 'invalid': object not found",
}),
)
DescribeTable("Git self signed cert tests", func(t refTestCase) {
err = gitServer.StartHTTPS(examplePublicKey, examplePrivateKey, exampleCA, "example.com")
defer gitServer.StopHTTP()
Expect(err).NotTo(HaveOccurred())
u, err := url.Parse(gitServer.HTTPAddress())
Expect(err).NotTo(HaveOccurred())
u.Path = path.Join(u.Path, fmt.Sprintf("repository-%s.git", randStringRunes(5)))
var transport = httptransport.NewClient(&http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
})
client.InstallProtocol("https", transport)
fs := memfs.New()
gitrepo, err := git.Init(memory.NewStorage(), fs)
Expect(err).NotTo(HaveOccurred())
wt, err := gitrepo.Worktree()
Expect(err).NotTo(HaveOccurred())
ff, _ := fs.Create("fixture")
_ = ff.Close()
_, err = wt.Add(fs.Join("fixture"))
Expect(err).NotTo(HaveOccurred())
commit, err := wt.Commit("Sample", &git.CommitOptions{Author: &object.Signature{
Name: "John Doe",
Email: "john@example.com",
When: time.Now(),
}})
Expect(err).NotTo(HaveOccurred())
for _, ref := range t.createRefs {
hRef := plumbing.NewHashReference(plumbing.ReferenceName(ref), commit)
err = gitrepo.Storer.SetReference(hRef)
Expect(err).NotTo(HaveOccurred())
}
remote, err := gitrepo.CreateRemote(&config.RemoteConfig{
Name: "origin",
URLs: []string{u.String()},
})
Expect(err).NotTo(HaveOccurred())
err = remote.Push(&git.PushOptions{
RefSpecs: []config.RefSpec{"refs/heads/*:refs/heads/*", "refs/tags/*:refs/tags/*"},
})
Expect(err).NotTo(HaveOccurred())
t.reference.Commit = strings.Replace(t.reference.Commit, "<commit>", commit.String(), 1)
client.InstallProtocol("https", httptransport.DefaultClient)
key := types.NamespacedName{
Name: fmt.Sprintf("git-ref-test-%s", randStringRunes(5)),
Namespace: namespace.Name,
}
created := &sourcev1.GitRepository{
ObjectMeta: metav1.ObjectMeta{
Name: key.Name,
Namespace: key.Namespace,
},
Spec: sourcev1.GitRepositorySpec{
URL: u.String(),
Interval: metav1.Duration{Duration: indexInterval},
Reference: t.reference,
GitImplementation: t.gitImplementation,
SecretRef: t.secretRef,
},
}
Expect(k8sClient.Create(context.Background(), created)).Should(Succeed())
defer k8sClient.Delete(context.Background(), created)
got := &sourcev1.GitRepository{}
var cond metav1.Condition
Eventually(func() bool {
_ = k8sClient.Get(context.Background(), key, got)
for _, c := range got.Status.Conditions {
if c.Reason == t.waitForReason {
cond = c
return true
}
}
return false
}, timeout, interval).Should(BeTrue())
Expect(cond.Status).To(Equal(t.expectStatus))
Expect(cond.Message).To(ContainSubstring(t.expectMessage))
Expect(got.Status.Artifact == nil).To(Equal(t.expectRevision == ""))
},
Entry("self signed libgit2 without CA", refTestCase{
reference: &sourcev1.GitRepositoryRef{Branch: "main"},
waitForReason: sourcev1.GitOperationFailedReason,
expectStatus: metav1.ConditionFalse,
expectMessage: "unable to clone: user rejected certificate",
gitImplementation: sourcev1.LibGit2Implementation,
}),
Entry("self signed libgit2 with CA", refTestCase{
reference: &sourcev1.GitRepositoryRef{Branch: "some-branch"},
createRefs: []string{"refs/heads/some-branch"},
waitForReason: sourcev1.GitOperationSucceedReason,
expectStatus: metav1.ConditionTrue,
expectRevision: "some-branch",
secretRef: &meta.LocalObjectReference{Name: "cert"},
gitImplementation: sourcev1.LibGit2Implementation,
}),
Entry("self signed go-git without CA", refTestCase{
reference: &sourcev1.GitRepositoryRef{Branch: "main"},
waitForReason: sourcev1.GitOperationFailedReason,
expectStatus: metav1.ConditionFalse,
expectMessage: "x509: certificate signed by unknown authority",
}),
Entry("self signed go-git with CA", refTestCase{
reference: &sourcev1.GitRepositoryRef{Branch: "some-branch"},
createRefs: []string{"refs/heads/some-branch"},
waitForReason: sourcev1.GitOperationSucceedReason,
expectStatus: metav1.ConditionTrue,
expectRevision: "some-branch",
secretRef: &meta.LocalObjectReference{Name: "cert"},
gitImplementation: sourcev1.GoGitImplementation,
}),
)
Context("recurse submodules", func() {
It("downloads submodules when asked", func() {
Expect(gitServer.StartHTTP()).To(Succeed())
defer gitServer.StopHTTP()
u, err := url.Parse(gitServer.HTTPAddress())
Expect(err).NotTo(HaveOccurred())
subRepoURL := *u
subRepoURL.Path = path.Join(u.Path, fmt.Sprintf("subrepository-%s.git", randStringRunes(5)))
// create the git repo to use as a submodule
fs := memfs.New()
subRepo, err := git.Init(memory.NewStorage(), fs)
Expect(err).NotTo(HaveOccurred())
wt, err := subRepo.Worktree()
Expect(err).NotTo(HaveOccurred())
ff, _ := fs.Create("fixture")
_ = ff.Close()
_, err = wt.Add(fs.Join("fixture"))
Expect(err).NotTo(HaveOccurred())
_, err = wt.Commit("Sample", &git.CommitOptions{Author: &object.Signature{
Name: "John Doe",
Email: "john@example.com",
When: time.Now(),
}})
Expect(err).NotTo(HaveOccurred())
remote, err := subRepo.CreateRemote(&config.RemoteConfig{
Name: "origin",
URLs: []string{subRepoURL.String()},
})
Expect(err).NotTo(HaveOccurred())
err = remote.Push(&git.PushOptions{
RefSpecs: []config.RefSpec{"refs/heads/*:refs/heads/*", "refs/tags/*:refs/tags/*"},
})
Expect(err).NotTo(HaveOccurred())
// this one is linked to a real directory, so that I can
// exec `git submodule add` later
tmp, err := os.MkdirTemp("", "flux-test")
Expect(err).NotTo(HaveOccurred())
defer os.RemoveAll(tmp)
repoDir := filepath.Join(tmp, "git")
repo, err := git.PlainInit(repoDir, false)
Expect(err).NotTo(HaveOccurred())
wt, err = repo.Worktree()
Expect(err).NotTo(HaveOccurred())
_, err = wt.Commit("Initial revision", &git.CommitOptions{
Author: &object.Signature{
Name: "John Doe",
Email: "john@example.com",
When: time.Now(),
}})
Expect(err).NotTo(HaveOccurred())
submodAdd := exec.Command("git", "submodule", "add", "-b", "master", subRepoURL.String(), "sub")
submodAdd.Dir = repoDir
out, err := submodAdd.CombinedOutput()
os.Stdout.Write(out)
Expect(err).NotTo(HaveOccurred())
_, err = wt.Commit("Add submodule", &git.CommitOptions{
Author: &object.Signature{
Name: "John Doe",
Email: "john@example.com",
When: time.Now(),
}})
Expect(err).NotTo(HaveOccurred())
mainRepoURL := *u
mainRepoURL.Path = path.Join(u.Path, fmt.Sprintf("repository-%s.git", randStringRunes(5)))
remote, err = repo.CreateRemote(&config.RemoteConfig{
Name: "origin",
URLs: []string{mainRepoURL.String()},
})
Expect(err).NotTo(HaveOccurred())
err = remote.Push(&git.PushOptions{
RefSpecs: []config.RefSpec{"refs/heads/*:refs/heads/*", "refs/tags/*:refs/tags/*"},
})
Expect(err).NotTo(HaveOccurred())
key := types.NamespacedName{
Name: fmt.Sprintf("git-ref-test-%s", randStringRunes(5)),
Namespace: namespace.Name,
}
created := &sourcev1.GitRepository{
ObjectMeta: metav1.ObjectMeta{
Name: key.Name,
Namespace: key.Namespace,
},
Spec: sourcev1.GitRepositorySpec{
URL: mainRepoURL.String(),
Interval: metav1.Duration{Duration: indexInterval},
Reference: &sourcev1.GitRepositoryRef{Branch: "master"},
GitImplementation: sourcev1.GoGitImplementation, // only works with go-git
RecurseSubmodules: true,
},
}
Expect(k8sClient.Create(context.Background(), created)).Should(Succeed())
defer k8sClient.Delete(context.Background(), created)
got := &sourcev1.GitRepository{}
Eventually(func() bool {
_ = k8sClient.Get(context.Background(), key, got)
for _, c := range got.Status.Conditions {
if c.Reason == sourcev1.GitOperationSucceedReason {
return true
}
}
return false
}, timeout, interval).Should(BeTrue())
// check that the downloaded artifact includes the
// file from the submodule
res, err := http.Get(got.Status.URL)
Expect(err).NotTo(HaveOccurred())
Expect(res.StatusCode).To(Equal(http.StatusOK))
_, err = untar.Untar(res.Body, filepath.Join(tmp, "tar"))
Expect(err).NotTo(HaveOccurred())
Expect(filepath.Join(tmp, "tar", "sub", "fixture")).To(BeAnExistingFile())
})
})
type includeTestCase struct {
fromPath string
toPath string
createFiles []string
checkFiles []string
}
DescribeTable("Include git repositories", func(t includeTestCase) {
Expect(gitServer.StartHTTP()).To(Succeed())
defer gitServer.StopHTTP()
u, err := url.Parse(gitServer.HTTPAddress())
Expect(err).NotTo(HaveOccurred())
// create the main git repository
mainRepoURL := *u
mainRepoURL.Path = path.Join(u.Path, fmt.Sprintf("repository-%s.git", randStringRunes(5)))
mainFs := memfs.New()
mainRepo, err := git.Init(memory.NewStorage(), mainFs)
Expect(err).NotTo(HaveOccurred())
mainWt, err := mainRepo.Worktree()
Expect(err).NotTo(HaveOccurred())
ff, _ := mainFs.Create("fixture")
_ = ff.Close()
_, err = mainWt.Add(mainFs.Join("fixture"))
Expect(err).NotTo(HaveOccurred())
_, err = mainWt.Commit("Sample", &git.CommitOptions{Author: &object.Signature{
Name: "John Doe",
Email: "john@example.com",
When: time.Now(),
}})
Expect(err).NotTo(HaveOccurred())
mainRemote, err := mainRepo.CreateRemote(&config.RemoteConfig{
Name: "origin",
URLs: []string{mainRepoURL.String()},
})
Expect(err).NotTo(HaveOccurred())
err = mainRemote.Push(&git.PushOptions{
RefSpecs: []config.RefSpec{"refs/heads/*:refs/heads/*", "refs/tags/*:refs/tags/*"},
})
Expect(err).NotTo(HaveOccurred())
// create the sub git repository
subRepoURL := *u
subRepoURL.Path = path.Join(u.Path, fmt.Sprintf("subrepository-%s.git", randStringRunes(5)))
subFs := memfs.New()
subRepo, err := git.Init(memory.NewStorage(), subFs)
Expect(err).NotTo(HaveOccurred())
subWt, err := subRepo.Worktree()
Expect(err).NotTo(HaveOccurred())
for _, v := range t.createFiles {
if dir := filepath.Base(v); dir != v {
err := subFs.MkdirAll(dir, 0700)
Expect(err).NotTo(HaveOccurred())
}
ff, err := subFs.Create(v)
Expect(err).NotTo(HaveOccurred())
_ = ff.Close()
_, err = subWt.Add(subFs.Join(v))
Expect(err).NotTo(HaveOccurred())
}
_, err = subWt.Commit("Sample", &git.CommitOptions{Author: &object.Signature{
Name: "John Doe",
Email: "john@example.com",
When: time.Now(),
}})
Expect(err).NotTo(HaveOccurred())
subRemote, err := subRepo.CreateRemote(&config.RemoteConfig{
Name: "origin",
URLs: []string{subRepoURL.String()},
})
Expect(err).NotTo(HaveOccurred())
err = subRemote.Push(&git.PushOptions{
RefSpecs: []config.RefSpec{"refs/heads/*:refs/heads/*", "refs/tags/*:refs/tags/*"},
})
Expect(err).NotTo(HaveOccurred())
// create main and sub resetRepositories
subKey := types.NamespacedName{
Name: fmt.Sprintf("git-ref-test-%s", randStringRunes(5)),
Namespace: namespace.Name,
}
subCreated := &sourcev1.GitRepository{
ObjectMeta: metav1.ObjectMeta{
Name: subKey.Name,
Namespace: subKey.Namespace,
},
Spec: sourcev1.GitRepositorySpec{
URL: subRepoURL.String(),
Interval: metav1.Duration{Duration: indexInterval},
Reference: &sourcev1.GitRepositoryRef{Branch: "master"},
},
}
Expect(k8sClient.Create(context.Background(), subCreated)).Should(Succeed())
defer k8sClient.Delete(context.Background(), subCreated)
mainKey := types.NamespacedName{
Name: fmt.Sprintf("git-ref-test-%s", randStringRunes(5)),
Namespace: namespace.Name,
}
mainCreated := &sourcev1.GitRepository{
ObjectMeta: metav1.ObjectMeta{
Name: mainKey.Name,
Namespace: mainKey.Namespace,
},
Spec: sourcev1.GitRepositorySpec{
URL: mainRepoURL.String(),
Interval: metav1.Duration{Duration: indexInterval},
Reference: &sourcev1.GitRepositoryRef{Branch: "master"},
Include: []sourcev1.GitRepositoryInclude{
{
GitRepositoryRef: meta.LocalObjectReference{
Name: subKey.Name,
},
FromPath: t.fromPath,
ToPath: t.toPath,
},
},
},
}
Expect(k8sClient.Create(context.Background(), mainCreated)).Should(Succeed())
defer k8sClient.Delete(context.Background(), mainCreated)
got := &sourcev1.GitRepository{}
Eventually(func() bool {
_ = k8sClient.Get(context.Background(), mainKey, got)
for _, c := range got.Status.Conditions {
if c.Reason == sourcev1.GitOperationSucceedReason {
return true
}
}
return false
}, timeout, interval).Should(BeTrue())
// check the contents of the repository
res, err := http.Get(got.Status.URL)
Expect(err).NotTo(HaveOccurred())
Expect(res.StatusCode).To(Equal(http.StatusOK))
tmp, err := os.MkdirTemp("", "flux-test")
Expect(err).NotTo(HaveOccurred())
defer os.RemoveAll(tmp)
_, err = untar.Untar(res.Body, filepath.Join(tmp, "tar"))
Expect(err).NotTo(HaveOccurred())
for _, v := range t.checkFiles {
Expect(filepath.Join(tmp, "tar", v)).To(BeAnExistingFile())
}
// add new file to check that the change is reconciled
ff, err = subFs.Create(subFs.Join(t.fromPath, "test"))
Expect(err).NotTo(HaveOccurred())
err = ff.Close()
Expect(err).NotTo(HaveOccurred())
_, err = subWt.Add(subFs.Join(t.fromPath, "test"))
Expect(err).NotTo(HaveOccurred())
hash, err := subWt.Commit("Sample", &git.CommitOptions{Author: &object.Signature{
Name: "John Doe",
Email: "john@example.com",
When: time.Now(),
}})
Expect(err).NotTo(HaveOccurred())
err = subRemote.Push(&git.PushOptions{
RefSpecs: []config.RefSpec{"refs/heads/*:refs/heads/*", "refs/tags/*:refs/tags/*"},
})
Expect(err).NotTo(HaveOccurred())
got = &sourcev1.GitRepository{}
Eventually(func() bool {
_ = k8sClient.Get(context.Background(), mainKey, got)
if got.Status.IncludedArtifacts[0].Revision == fmt.Sprintf("master/%s", hash.String()) {
for _, c := range got.Status.Conditions {
if c.Reason == sourcev1.GitOperationSucceedReason {
return true
}
}
}
return false
}, timeout, interval).Should(BeTrue())
// get the main repository artifact
res, err = http.Get(got.Status.URL)
Expect(err).NotTo(HaveOccurred())
Expect(res.StatusCode).To(Equal(http.StatusOK))
tmp, err = os.MkdirTemp("", "flux-test")
Expect(err).NotTo(HaveOccurred())
defer os.RemoveAll(tmp)
_, err = untar.Untar(res.Body, filepath.Join(tmp, "tar"))
Expect(err).NotTo(HaveOccurred())
Expect(filepath.Join(tmp, "tar", t.toPath, "test")).To(BeAnExistingFile())
},
Entry("only to path", includeTestCase{
fromPath: "",
toPath: "sub",
createFiles: []string{"dir1", "dir2"},
checkFiles: []string{"sub/dir1", "sub/dir2"},
}),
Entry("to nested path", includeTestCase{
fromPath: "",
toPath: "sub/nested",
createFiles: []string{"dir1", "dir2"},
checkFiles: []string{"sub/nested/dir1", "sub/nested/dir2"},
}),
Entry("from and to path", includeTestCase{
fromPath: "nested",
toPath: "sub",
createFiles: []string{"dir1", "nested/dir2", "nested/dir3", "nested/foo/bar"},
checkFiles: []string{"sub/dir2", "sub/dir3", "sub/foo/bar"},
}),
)
})
})

View File

@ -0,0 +1,865 @@
/*
Copyright 2020 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"errors"
"fmt"
"net/url"
"os"
"path/filepath"
"strconv"
"strings"
"time"
securejoin "github.com/cyphar/filepath-securejoin"
helmgetter "helm.sh/helm/v3/pkg/getter"
corev1 "k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
apimeta "k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
kuberecorder "k8s.io/client-go/tools/record"
"k8s.io/client-go/tools/reference"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
"github.com/fluxcd/pkg/apis/meta"
"github.com/fluxcd/pkg/runtime/events"
"github.com/fluxcd/pkg/runtime/metrics"
"github.com/fluxcd/pkg/runtime/predicates"
"github.com/fluxcd/pkg/untar"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta1"
"github.com/fluxcd/source-controller/internal/helm/chart"
"github.com/fluxcd/source-controller/internal/helm/getter"
"github.com/fluxcd/source-controller/internal/helm/repository"
)
// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmcharts,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmcharts/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmcharts/finalizers,verbs=get;create;update;patch;delete
// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch
// HelmChartReconciler reconciles a HelmChart object
type HelmChartReconciler struct {
client.Client
Scheme *runtime.Scheme
Storage *Storage
Getters helmgetter.Providers
EventRecorder kuberecorder.EventRecorder
ExternalEventRecorder *events.Recorder
MetricsRecorder *metrics.Recorder
}
func (r *HelmChartReconciler) SetupWithManager(mgr ctrl.Manager) error {
return r.SetupWithManagerAndOptions(mgr, HelmChartReconcilerOptions{})
}
func (r *HelmChartReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts HelmChartReconcilerOptions) error {
if err := mgr.GetCache().IndexField(context.TODO(), &sourcev1.HelmRepository{}, sourcev1.HelmRepositoryURLIndexKey,
r.indexHelmRepositoryByURL); err != nil {
return fmt.Errorf("failed setting index fields: %w", err)
}
if err := mgr.GetCache().IndexField(context.TODO(), &sourcev1.HelmChart{}, sourcev1.SourceIndexKey,
r.indexHelmChartBySource); err != nil {
return fmt.Errorf("failed setting index fields: %w", err)
}
return ctrl.NewControllerManagedBy(mgr).
For(&sourcev1.HelmChart{}, builder.WithPredicates(
predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{}),
)).
Watches(
&source.Kind{Type: &sourcev1.HelmRepository{}},
handler.EnqueueRequestsFromMapFunc(r.requestsForHelmRepositoryChange),
builder.WithPredicates(SourceRevisionChangePredicate{}),
).
Watches(
&source.Kind{Type: &sourcev1.GitRepository{}},
handler.EnqueueRequestsFromMapFunc(r.requestsForGitRepositoryChange),
builder.WithPredicates(SourceRevisionChangePredicate{}),
).
Watches(
&source.Kind{Type: &sourcev1.Bucket{}},
handler.EnqueueRequestsFromMapFunc(r.requestsForBucketChange),
builder.WithPredicates(SourceRevisionChangePredicate{}),
).
WithOptions(controller.Options{MaxConcurrentReconciles: opts.MaxConcurrentReconciles}).
Complete(r)
}
func (r *HelmChartReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
start := time.Now()
log := ctrl.LoggerFrom(ctx)
var chart sourcev1.HelmChart
if err := r.Get(ctx, req.NamespacedName, &chart); err != nil {
return ctrl.Result{Requeue: true}, client.IgnoreNotFound(err)
}
// Record suspended status metric
defer r.recordSuspension(ctx, chart)
// Add our finalizer if it does not exist
if !controllerutil.ContainsFinalizer(&chart, sourcev1.SourceFinalizer) {
patch := client.MergeFrom(chart.DeepCopy())
controllerutil.AddFinalizer(&chart, sourcev1.SourceFinalizer)
if err := r.Patch(ctx, &chart, patch); err != nil {
log.Error(err, "unable to register finalizer")
return ctrl.Result{}, err
}
}
// Examine if the object is under deletion
if !chart.ObjectMeta.DeletionTimestamp.IsZero() {
return r.reconcileDelete(ctx, chart)
}
// Return early if the object is suspended.
if chart.Spec.Suspend {
log.Info("Reconciliation is suspended for this object")
return ctrl.Result{}, nil
}
// Record reconciliation duration
if r.MetricsRecorder != nil {
objRef, err := reference.GetReference(r.Scheme, &chart)
if err != nil {
return ctrl.Result{}, err
}
defer r.MetricsRecorder.RecordDuration(*objRef, start)
}
// Conditionally set progressing condition in status
resetChart, changed := r.resetStatus(chart)
if changed {
chart = resetChart
if err := r.updateStatus(ctx, req, chart.Status); err != nil {
log.Error(err, "unable to update status")
return ctrl.Result{Requeue: true}, err
}
r.recordReadiness(ctx, chart)
}
// Record the value of the reconciliation request, if any
// TODO(hidde): would be better to defer this in combination with
// always patching the status sub-resource after a reconciliation.
if v, ok := meta.ReconcileAnnotationValue(chart.GetAnnotations()); ok {
chart.Status.SetLastHandledReconcileRequest(v)
}
// Purge all but current artifact from storage
if err := r.gc(chart); err != nil {
log.Error(err, "unable to purge old artifacts")
}
// Retrieve the source
source, err := r.getSource(ctx, chart)
if err != nil {
chart = sourcev1.HelmChartNotReady(*chart.DeepCopy(), sourcev1.ChartPullFailedReason, err.Error())
if err := r.updateStatus(ctx, req, chart.Status); err != nil {
log.Error(err, "unable to update status")
}
return ctrl.Result{Requeue: true}, err
}
// Assert source is ready
if source.GetArtifact() == nil {
err = fmt.Errorf("no artifact found for source `%s` kind '%s'",
chart.Spec.SourceRef.Name, chart.Spec.SourceRef.Kind)
chart = sourcev1.HelmChartNotReady(*chart.DeepCopy(), sourcev1.ChartPullFailedReason, err.Error())
if err := r.updateStatus(ctx, req, chart.Status); err != nil {
log.Error(err, "unable to update status")
}
r.recordReadiness(ctx, chart)
return ctrl.Result{Requeue: true}, err
}
// Create working directory
workDir, err := os.MkdirTemp("", chart.Kind+"-"+chart.Namespace+"-"+chart.Name+"-")
if err != nil {
err = fmt.Errorf("failed to create temporary working directory: %w", err)
chart = sourcev1.HelmChartNotReady(*chart.DeepCopy(), sourcev1.ChartPullFailedReason, err.Error())
if err := r.updateStatus(ctx, req, chart.Status); err != nil {
log.Error(err, "unable to update status")
}
r.recordReadiness(ctx, chart)
return ctrl.Result{Requeue: true}, err
}
defer func() {
if err := os.RemoveAll(workDir); err != nil {
log.Error(err, "failed to remove working directory", "path", workDir)
}
}()
// Perform the reconciliation for the chart source type
var reconciledChart sourcev1.HelmChart
var reconcileErr error
switch typedSource := source.(type) {
case *sourcev1.HelmRepository:
reconciledChart, reconcileErr = r.fromHelmRepository(ctx, *typedSource, *chart.DeepCopy(), workDir, changed)
case *sourcev1.GitRepository, *sourcev1.Bucket:
reconciledChart, reconcileErr = r.fromTarballArtifact(ctx, *typedSource.GetArtifact(), *chart.DeepCopy(),
workDir, changed)
default:
err := fmt.Errorf("unable to reconcile unsupported source reference kind '%s'", chart.Spec.SourceRef.Kind)
return ctrl.Result{Requeue: false}, err
}
// Update status with the reconciliation result
if err := r.updateStatus(ctx, req, reconciledChart.Status); err != nil {
log.Error(err, "unable to update status")
return ctrl.Result{Requeue: true}, err
}
// If reconciliation failed, record the failure and requeue immediately
if reconcileErr != nil {
r.event(ctx, reconciledChart, events.EventSeverityError, reconcileErr.Error())
r.recordReadiness(ctx, reconciledChart)
return ctrl.Result{Requeue: true}, reconcileErr
}
// Emit an event if we did not have an artifact before, or the revision has changed
if (chart.GetArtifact() == nil && reconciledChart.GetArtifact() != nil) ||
(chart.GetArtifact() != nil && reconciledChart.GetArtifact() != nil && reconciledChart.GetArtifact().Revision != chart.GetArtifact().Revision) {
r.event(ctx, reconciledChart, events.EventSeverityInfo, sourcev1.HelmChartReadyMessage(reconciledChart))
}
r.recordReadiness(ctx, reconciledChart)
log.Info(fmt.Sprintf("Reconciliation finished in %s, next run in %s",
time.Since(start).String(),
chart.GetInterval().Duration.String(),
))
return ctrl.Result{RequeueAfter: chart.GetInterval().Duration}, nil
}
type HelmChartReconcilerOptions struct {
MaxConcurrentReconciles int
}
func (r *HelmChartReconciler) getSource(ctx context.Context, chart sourcev1.HelmChart) (sourcev1.Source, error) {
var source sourcev1.Source
namespacedName := types.NamespacedName{
Namespace: chart.GetNamespace(),
Name: chart.Spec.SourceRef.Name,
}
switch chart.Spec.SourceRef.Kind {
case sourcev1.HelmRepositoryKind:
var repository sourcev1.HelmRepository
err := r.Client.Get(ctx, namespacedName, &repository)
if err != nil {
return source, fmt.Errorf("failed to retrieve source: %w", err)
}
source = &repository
case sourcev1.GitRepositoryKind:
var repository sourcev1.GitRepository
err := r.Client.Get(ctx, namespacedName, &repository)
if err != nil {
return source, fmt.Errorf("failed to retrieve source: %w", err)
}
source = &repository
case sourcev1.BucketKind:
var bucket sourcev1.Bucket
err := r.Client.Get(ctx, namespacedName, &bucket)
if err != nil {
return source, fmt.Errorf("failed to retrieve source: %w", err)
}
source = &bucket
default:
return source, fmt.Errorf("source `%s` kind '%s' not supported",
chart.Spec.SourceRef.Name, chart.Spec.SourceRef.Kind)
}
return source, nil
}
func (r *HelmChartReconciler) fromHelmRepository(ctx context.Context, repo sourcev1.HelmRepository, c sourcev1.HelmChart,
workDir string, force bool) (sourcev1.HelmChart, error) {
// Configure Index getter options
clientOpts := []helmgetter.Option{
helmgetter.WithURL(repo.Spec.URL),
helmgetter.WithTimeout(repo.Spec.Timeout.Duration),
helmgetter.WithPassCredentialsAll(repo.Spec.PassCredentials),
}
if secret, err := r.getHelmRepositorySecret(ctx, &repo); err != nil {
return sourcev1.HelmChartNotReady(c, sourcev1.AuthenticationFailedReason, err.Error()), err
} else if secret != nil {
// Create temporary working directory for credentials
authDir := filepath.Join(workDir, "creds")
if err := os.Mkdir(authDir, 0700); err != nil {
err = fmt.Errorf("failed to create temporary directory for repository credentials: %w", err)
return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err
}
opts, err := getter.ClientOptionsFromSecret(authDir, *secret)
if err != nil {
err = fmt.Errorf("failed to create client options for HelmRepository '%s': %w", repo.Name, err)
return sourcev1.HelmChartNotReady(c, sourcev1.AuthenticationFailedReason, err.Error()), err
}
clientOpts = append(clientOpts, opts...)
}
// Initialize the chart repository
chartRepo, err := repository.NewChartRepository(repo.Spec.URL, r.Storage.LocalPath(*repo.GetArtifact()), r.Getters, clientOpts)
if err != nil {
switch err.(type) {
case *url.Error:
return sourcev1.HelmChartNotReady(c, sourcev1.URLInvalidReason, err.Error()), err
default:
return sourcev1.HelmChartNotReady(c, sourcev1.ChartPullFailedReason, err.Error()), err
}
}
// Build the chart
cb := chart.NewRemoteBuilder(chartRepo)
ref := chart.RemoteReference{Name: c.Spec.Chart, Version: c.Spec.Version}
opts := chart.BuildOptions{
ValuesFiles: c.GetValuesFiles(),
Force: force,
}
if artifact := c.GetArtifact(); artifact != nil {
opts.CachedChart = r.Storage.LocalPath(*artifact)
}
// Set the VersionMetadata to the object's Generation if ValuesFiles is defined
// This ensures changes can be noticed by the Artifact consumer
if len(opts.GetValuesFiles()) > 0 {
opts.VersionMetadata = strconv.FormatInt(c.Generation, 10)
}
b, err := cb.Build(ctx, ref, filepath.Join(workDir, "chart.tgz"), opts)
if err != nil {
return sourcev1.HelmChartNotReady(c, sourcev1.ChartPullFailedReason, err.Error()), err
}
newArtifact := r.Storage.NewArtifactFor(c.Kind, c.GetObjectMeta(), b.Version,
fmt.Sprintf("%s-%s.tgz", b.Name, b.Version))
// If the path of the returned build equals the cache path,
// there are no changes to the chart
if b.Path == opts.CachedChart {
// Ensure hostname is updated
if c.GetArtifact().URL != newArtifact.URL {
r.Storage.SetArtifactURL(c.GetArtifact())
c.Status.URL = r.Storage.SetHostname(c.Status.URL)
}
return c, nil
}
// Ensure artifact directory exists
err = r.Storage.MkdirAll(newArtifact)
if err != nil {
err = fmt.Errorf("unable to create chart directory: %w", err)
return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err
}
// Acquire a lock for the artifact
unlock, err := r.Storage.Lock(newArtifact)
if err != nil {
err = fmt.Errorf("unable to acquire lock: %w", err)
return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err
}
defer unlock()
// Copy the packaged chart to the artifact path
if err = r.Storage.CopyFromPath(&newArtifact, b.Path); err != nil {
err = fmt.Errorf("failed to write chart package to storage: %w", err)
return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err
}
// Update symlink
cUrl, err := r.Storage.Symlink(newArtifact, fmt.Sprintf("%s-latest.tgz", b.Name))
if err != nil {
err = fmt.Errorf("storage error: %w", err)
return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err
}
return sourcev1.HelmChartReady(c, newArtifact, cUrl, sourcev1.ChartPullSucceededReason, b.Summary()), nil
}
func (r *HelmChartReconciler) fromTarballArtifact(ctx context.Context, source sourcev1.Artifact, c sourcev1.HelmChart,
workDir string, force bool) (sourcev1.HelmChart, error) {
// Create temporary working directory to untar into
sourceDir := filepath.Join(workDir, "source")
if err := os.Mkdir(sourceDir, 0700); err != nil {
err = fmt.Errorf("failed to create temporary directory to untar source into: %w", err)
return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err
}
// Open the tarball artifact file and untar files into working directory
f, err := os.Open(r.Storage.LocalPath(source))
if err != nil {
err = fmt.Errorf("artifact open error: %w", err)
return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err
}
if _, err = untar.Untar(f, sourceDir); err != nil {
_ = f.Close()
err = fmt.Errorf("artifact untar error: %w", err)
return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err
}
if err = f.Close(); err != nil {
err = fmt.Errorf("artifact close error: %w", err)
return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err
}
chartPath, err := securejoin.SecureJoin(sourceDir, c.Spec.Chart)
if err != nil {
return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err
}
// Setup dependency manager
authDir := filepath.Join(workDir, "creds")
if err = os.Mkdir(authDir, 0700); err != nil {
err = fmt.Errorf("failed to create temporaRy directory for dependency credentials: %w", err)
return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err
}
dm := chart.NewDependencyManager(
chart.WithRepositoryCallback(r.namespacedChartRepositoryCallback(ctx, authDir, c.GetNamespace())),
)
defer dm.Clear()
// Configure builder options, including any previously cached chart
opts := chart.BuildOptions{
ValuesFiles: c.GetValuesFiles(),
Force: force,
}
if artifact := c.Status.Artifact; artifact != nil {
opts.CachedChart = artifact.Path
}
// Configure revision metadata for chart build if we should react to revision changes
if c.Spec.ReconcileStrategy == sourcev1.ReconcileStrategyRevision {
rev := source.Revision
if c.Spec.SourceRef.Kind == sourcev1.GitRepositoryKind {
// Split the reference by the `/` delimiter which may be present,
// and take the last entry which contains the SHA.
split := strings.Split(source.Revision, "/")
rev = split[len(split)-1]
}
if kind := c.Spec.SourceRef.Kind; kind == sourcev1.GitRepositoryKind || kind == sourcev1.BucketKind {
// The SemVer from the metadata is at times used in e.g. the label metadata for a resource
// in a chart, which has a limited length of 63 characters.
// To not fill most of this space with a full length SHA hex (40 characters for SHA-1, and
// even more for SHA-2 for a chart from a Bucket), we shorten this to the first 12
// characters taken from the hex.
// For SHA-1, this has proven to be unique in the Linux kernel with over 875.000 commits
// (http://git-scm.com/book/en/v2/Git-Tools-Revision-Selection#Short-SHA-1).
// Note that for a collision to be problematic, it would need to happen right after the
// previous SHA for the artifact, which is highly unlikely, if not virtually impossible.
// Ref: https://en.wikipedia.org/wiki/Birthday_attack
rev = rev[0:12]
}
opts.VersionMetadata = rev
}
// Set the VersionMetadata to the object's Generation if ValuesFiles is defined,
// this ensures changes can be noticed by the Artifact consumer
if len(opts.GetValuesFiles()) > 0 {
if opts.VersionMetadata != "" {
opts.VersionMetadata += "."
}
opts.VersionMetadata += strconv.FormatInt(c.Generation, 10)
}
// Build chart
cb := chart.NewLocalBuilder(dm)
b, err := cb.Build(ctx, chart.LocalReference{WorkDir: sourceDir, Path: chartPath}, filepath.Join(workDir, "chart.tgz"), opts)
if err != nil {
return sourcev1.HelmChartNotReady(c, reasonForBuildError(err), err.Error()), err
}
newArtifact := r.Storage.NewArtifactFor(c.Kind, c.GetObjectMeta(), b.Version,
fmt.Sprintf("%s-%s.tgz", b.Name, b.Version))
// If the path of the returned build equals the cache path,
// there are no changes to the chart
if apimeta.IsStatusConditionTrue(c.Status.Conditions, meta.ReadyCondition) &&
b.Path == opts.CachedChart {
// Ensure hostname is updated
if c.GetArtifact().URL != newArtifact.URL {
r.Storage.SetArtifactURL(c.GetArtifact())
c.Status.URL = r.Storage.SetHostname(c.Status.URL)
}
return c, nil
}
// Ensure artifact directory exists
err = r.Storage.MkdirAll(newArtifact)
if err != nil {
err = fmt.Errorf("unable to create chart directory: %w", err)
return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err
}
// Acquire a lock for the artifact
unlock, err := r.Storage.Lock(newArtifact)
if err != nil {
err = fmt.Errorf("unable to acquire lock: %w", err)
return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err
}
defer unlock()
// Copy the packaged chart to the artifact path
if err = r.Storage.CopyFromPath(&newArtifact, b.Path); err != nil {
err = fmt.Errorf("failed to write chart package to storage: %w", err)
return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err
}
// Update symlink
cUrl, err := r.Storage.Symlink(newArtifact, fmt.Sprintf("%s-latest.tgz", b.Name))
if err != nil {
err = fmt.Errorf("storage error: %w", err)
return sourcev1.HelmChartNotReady(c, sourcev1.StorageOperationFailedReason, err.Error()), err
}
return sourcev1.HelmChartReady(c, newArtifact, cUrl, reasonForBuildSuccess(b), b.Summary()), nil
}
// namespacedChartRepositoryCallback returns a chart.GetChartRepositoryCallback
// scoped to the given namespace. Credentials for retrieved v1beta1.HelmRepository
// objects are stored in the given directory.
// The returned callback returns a repository.ChartRepository configured with the
// retrieved v1beta1.HelmRepository, or a shim with defaults if no object could
// be found.
func (r *HelmChartReconciler) namespacedChartRepositoryCallback(ctx context.Context, dir, namespace string) chart.GetChartRepositoryCallback {
return func(url string) (*repository.ChartRepository, error) {
repo, err := r.resolveDependencyRepository(ctx, url, namespace)
if err != nil {
// Return Kubernetes client errors, but ignore others
if apierrs.ReasonForError(err) != metav1.StatusReasonUnknown {
return nil, err
}
repo = &sourcev1.HelmRepository{
Spec: sourcev1.HelmRepositorySpec{
URL: url,
Timeout: &metav1.Duration{Duration: 60 * time.Second},
},
}
}
clientOpts := []helmgetter.Option{
helmgetter.WithURL(repo.Spec.URL),
helmgetter.WithTimeout(repo.Spec.Timeout.Duration),
helmgetter.WithPassCredentialsAll(repo.Spec.PassCredentials),
}
if secret, err := r.getHelmRepositorySecret(ctx, repo); err != nil {
return nil, err
} else if secret != nil {
opts, err := getter.ClientOptionsFromSecret(dir, *secret)
if err != nil {
return nil, err
}
clientOpts = append(clientOpts, opts...)
}
chartRepo, err := repository.NewChartRepository(repo.Spec.URL, "", r.Getters, clientOpts)
if err != nil {
return nil, err
}
if repo.Status.Artifact != nil {
chartRepo.CachePath = r.Storage.LocalPath(*repo.GetArtifact())
}
return chartRepo, nil
}
}
func (r *HelmChartReconciler) reconcileDelete(ctx context.Context, chart sourcev1.HelmChart) (ctrl.Result, error) {
// Our finalizer is still present, so lets handle garbage collection
if err := r.gc(chart); err != nil {
r.event(ctx, chart, events.EventSeverityError,
fmt.Sprintf("garbage collection for deleted resource failed: %s", err.Error()))
// Return the error so we retry the failed garbage collection
return ctrl.Result{}, err
}
// Record deleted status
r.recordReadiness(ctx, chart)
// Remove our finalizer from the list and update it
controllerutil.RemoveFinalizer(&chart, sourcev1.SourceFinalizer)
if err := r.Update(ctx, &chart); err != nil {
return ctrl.Result{}, err
}
// Stop reconciliation as the object is being deleted
return ctrl.Result{}, nil
}
// resetStatus returns a modified v1beta1.HelmChart and a boolean indicating
// if the status field has been reset.
func (r *HelmChartReconciler) resetStatus(chart sourcev1.HelmChart) (sourcev1.HelmChart, bool) {
// We do not have an artifact, or it does no longer exist
if chart.GetArtifact() == nil || !r.Storage.ArtifactExist(*chart.GetArtifact()) {
chart = sourcev1.HelmChartProgressing(chart)
chart.Status.Artifact = nil
return chart, true
}
// The chart specification has changed
if chart.Generation != chart.Status.ObservedGeneration {
return sourcev1.HelmChartProgressing(chart), true
}
return chart, false
}
// gc performs a garbage collection for the given v1beta1.HelmChart.
// It removes all but the current artifact except for when the
// deletion timestamp is set, which will result in the removal of
// all artifacts for the resource.
func (r *HelmChartReconciler) gc(chart sourcev1.HelmChart) error {
if !chart.DeletionTimestamp.IsZero() {
return r.Storage.RemoveAll(r.Storage.NewArtifactFor(chart.Kind, chart.GetObjectMeta(), "", "*"))
}
if chart.GetArtifact() != nil {
return r.Storage.RemoveAllButCurrent(*chart.GetArtifact())
}
return nil
}
// event emits a Kubernetes event and forwards the event to notification
// controller if configured.
func (r *HelmChartReconciler) event(ctx context.Context, chart sourcev1.HelmChart, severity, msg string) {
log := ctrl.LoggerFrom(ctx)
if r.EventRecorder != nil {
r.EventRecorder.Eventf(&chart, "Normal", severity, msg)
}
if r.ExternalEventRecorder != nil {
objRef, err := reference.GetReference(r.Scheme, &chart)
if err != nil {
log.Error(err, "unable to send event")
return
}
if err := r.ExternalEventRecorder.Eventf(*objRef, nil, severity, severity, msg); err != nil {
log.Error(err, "unable to send event")
return
}
}
}
func (r *HelmChartReconciler) recordReadiness(ctx context.Context, chart sourcev1.HelmChart) {
log := ctrl.LoggerFrom(ctx)
if r.MetricsRecorder == nil {
return
}
objRef, err := reference.GetReference(r.Scheme, &chart)
if err != nil {
log.Error(err, "unable to record readiness metric")
return
}
if rc := apimeta.FindStatusCondition(chart.Status.Conditions, meta.ReadyCondition); rc != nil {
r.MetricsRecorder.RecordCondition(*objRef, *rc, !chart.DeletionTimestamp.IsZero())
} else {
r.MetricsRecorder.RecordCondition(*objRef, metav1.Condition{
Type: meta.ReadyCondition,
Status: metav1.ConditionUnknown,
}, !chart.DeletionTimestamp.IsZero())
}
}
func (r *HelmChartReconciler) updateStatus(ctx context.Context, req ctrl.Request, newStatus sourcev1.HelmChartStatus) error {
var chart sourcev1.HelmChart
if err := r.Get(ctx, req.NamespacedName, &chart); err != nil {
return err
}
patch := client.MergeFrom(chart.DeepCopy())
chart.Status = newStatus
return r.Status().Patch(ctx, &chart, patch)
}
func (r *HelmChartReconciler) indexHelmRepositoryByURL(o client.Object) []string {
repo, ok := o.(*sourcev1.HelmRepository)
if !ok {
panic(fmt.Sprintf("Expected a HelmRepository, got %T", o))
}
u := repository.NormalizeURL(repo.Spec.URL)
if u != "" {
return []string{u}
}
return nil
}
func (r *HelmChartReconciler) indexHelmChartBySource(o client.Object) []string {
hc, ok := o.(*sourcev1.HelmChart)
if !ok {
panic(fmt.Sprintf("Expected a HelmChart, got %T", o))
}
return []string{fmt.Sprintf("%s/%s", hc.Spec.SourceRef.Kind, hc.Spec.SourceRef.Name)}
}
func (r *HelmChartReconciler) resolveDependencyRepository(ctx context.Context, url string, namespace string) (*sourcev1.HelmRepository, error) {
listOpts := []client.ListOption{
client.InNamespace(namespace),
client.MatchingFields{sourcev1.HelmRepositoryURLIndexKey: url},
}
var list sourcev1.HelmRepositoryList
err := r.Client.List(ctx, &list, listOpts...)
if err != nil {
return nil, fmt.Errorf("unable to retrieve HelmRepositoryList: %w", err)
}
if len(list.Items) > 0 {
return &list.Items[0], nil
}
return nil, fmt.Errorf("no HelmRepository found for '%s' in '%s' namespace", url, namespace)
}
func (r *HelmChartReconciler) getHelmRepositorySecret(ctx context.Context, repository *sourcev1.HelmRepository) (*corev1.Secret, error) {
if repository.Spec.SecretRef != nil {
name := types.NamespacedName{
Namespace: repository.GetNamespace(),
Name: repository.Spec.SecretRef.Name,
}
var secret corev1.Secret
err := r.Client.Get(ctx, name, &secret)
if err != nil {
err = fmt.Errorf("auth secret error: %w", err)
return nil, err
}
return &secret, nil
}
return nil, nil
}
func (r *HelmChartReconciler) requestsForHelmRepositoryChange(o client.Object) []reconcile.Request {
repo, ok := o.(*sourcev1.HelmRepository)
if !ok {
panic(fmt.Sprintf("Expected a HelmRepository, got %T", o))
}
// If we do not have an artifact, we have no requests to make
if repo.GetArtifact() == nil {
return nil
}
ctx := context.Background()
var list sourcev1.HelmChartList
if err := r.List(ctx, &list, client.MatchingFields{
sourcev1.SourceIndexKey: fmt.Sprintf("%s/%s", sourcev1.HelmRepositoryKind, repo.Name),
}); err != nil {
return nil
}
// TODO(hidde): unlike other places (e.g. the helm-controller),
// we have no reference here to determine if the request is coming
// from the _old_ or _new_ update event, and resources are thus
// enqueued twice.
var reqs []reconcile.Request
for _, i := range list.Items {
reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&i)})
}
return reqs
}
func (r *HelmChartReconciler) requestsForGitRepositoryChange(o client.Object) []reconcile.Request {
repo, ok := o.(*sourcev1.GitRepository)
if !ok {
panic(fmt.Sprintf("Expected a GitRepository, got %T", o))
}
// If we do not have an artifact, we have no requests to make
if repo.GetArtifact() == nil {
return nil
}
var list sourcev1.HelmChartList
if err := r.List(context.TODO(), &list, client.MatchingFields{
sourcev1.SourceIndexKey: fmt.Sprintf("%s/%s", sourcev1.GitRepositoryKind, repo.Name),
}); err != nil {
return nil
}
// TODO(hidde): unlike other places (e.g. the helm-controller),
// we have no reference here to determine if the request is coming
// from the _old_ or _new_ update event, and resources are thus
// enqueued twice.
var reqs []reconcile.Request
for _, i := range list.Items {
reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&i)})
}
return reqs
}
func (r *HelmChartReconciler) requestsForBucketChange(o client.Object) []reconcile.Request {
bucket, ok := o.(*sourcev1.Bucket)
if !ok {
panic(fmt.Sprintf("Expected a Bucket, got %T", o))
}
// If we do not have an artifact, we have no requests to make
if bucket.GetArtifact() == nil {
return nil
}
var list sourcev1.HelmChartList
if err := r.List(context.TODO(), &list, client.MatchingFields{
sourcev1.SourceIndexKey: fmt.Sprintf("%s/%s", sourcev1.BucketKind, bucket.Name),
}); err != nil {
return nil
}
// TODO(hidde): unlike other places (e.g. the helm-controller),
// we have no reference here to determine if the request is coming
// from the _old_ or _new_ update event, and resources are thus
// enqueued twice.
var reqs []reconcile.Request
for _, i := range list.Items {
reqs = append(reqs, reconcile.Request{NamespacedName: client.ObjectKeyFromObject(&i)})
}
return reqs
}
func (r *HelmChartReconciler) recordSuspension(ctx context.Context, chart sourcev1.HelmChart) {
if r.MetricsRecorder == nil {
return
}
log := ctrl.LoggerFrom(ctx)
objRef, err := reference.GetReference(r.Scheme, &chart)
if err != nil {
log.Error(err, "unable to record suspended metric")
return
}
if !chart.DeletionTimestamp.IsZero() {
r.MetricsRecorder.RecordSuspend(*objRef, false)
} else {
r.MetricsRecorder.RecordSuspend(*objRef, chart.Spec.Suspend)
}
}
func reasonForBuildError(err error) string {
var buildErr *chart.BuildError
if ok := errors.As(err, &buildErr); !ok {
return sourcev1.ChartPullFailedReason
}
switch buildErr.Reason {
case chart.ErrChartMetadataPatch, chart.ErrValuesFilesMerge, chart.ErrDependencyBuild, chart.ErrChartPackage:
return sourcev1.ChartPackageFailedReason
default:
return sourcev1.ChartPullFailedReason
}
}
func reasonForBuildSuccess(result *chart.Build) string {
if result.Packaged {
return sourcev1.ChartPackageSucceededReason
}
return sourcev1.ChartPullSucceededReason
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,405 @@
/*
Copyright 2020 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"fmt"
"net/url"
"os"
"time"
helmgetter "helm.sh/helm/v3/pkg/getter"
corev1 "k8s.io/api/core/v1"
apimeta "k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
kuberecorder "k8s.io/client-go/tools/record"
"k8s.io/client-go/tools/reference"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"github.com/fluxcd/pkg/apis/meta"
"github.com/fluxcd/pkg/runtime/events"
"github.com/fluxcd/pkg/runtime/metrics"
"github.com/fluxcd/pkg/runtime/predicates"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta1"
"github.com/fluxcd/source-controller/internal/helm/getter"
"github.com/fluxcd/source-controller/internal/helm/repository"
)
// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmrepositories,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmrepositories/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmrepositories/finalizers,verbs=get;create;update;patch;delete
// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch
// HelmRepositoryReconciler reconciles a HelmRepository object
type HelmRepositoryReconciler struct {
client.Client
Scheme *runtime.Scheme
Storage *Storage
Getters helmgetter.Providers
EventRecorder kuberecorder.EventRecorder
ExternalEventRecorder *events.Recorder
MetricsRecorder *metrics.Recorder
}
type HelmRepositoryReconcilerOptions struct {
MaxConcurrentReconciles int
}
func (r *HelmRepositoryReconciler) SetupWithManager(mgr ctrl.Manager) error {
return r.SetupWithManagerAndOptions(mgr, HelmRepositoryReconcilerOptions{})
}
func (r *HelmRepositoryReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts HelmRepositoryReconcilerOptions) error {
return ctrl.NewControllerManagedBy(mgr).
For(&sourcev1.HelmRepository{}).
WithEventFilter(predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{})).
WithOptions(controller.Options{MaxConcurrentReconciles: opts.MaxConcurrentReconciles}).
Complete(r)
}
func (r *HelmRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
start := time.Now()
log := ctrl.LoggerFrom(ctx)
var repository sourcev1.HelmRepository
if err := r.Get(ctx, req.NamespacedName, &repository); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
// Record suspended status metric
defer r.recordSuspension(ctx, repository)
// Add our finalizer if it does not exist
if !controllerutil.ContainsFinalizer(&repository, sourcev1.SourceFinalizer) {
patch := client.MergeFrom(repository.DeepCopy())
controllerutil.AddFinalizer(&repository, sourcev1.SourceFinalizer)
if err := r.Patch(ctx, &repository, patch); err != nil {
log.Error(err, "unable to register finalizer")
return ctrl.Result{}, err
}
}
// Examine if the object is under deletion
if !repository.ObjectMeta.DeletionTimestamp.IsZero() {
return r.reconcileDelete(ctx, repository)
}
// Return early if the object is suspended.
if repository.Spec.Suspend {
log.Info("Reconciliation is suspended for this object")
return ctrl.Result{}, nil
}
// record reconciliation duration
if r.MetricsRecorder != nil {
objRef, err := reference.GetReference(r.Scheme, &repository)
if err != nil {
return ctrl.Result{}, err
}
defer r.MetricsRecorder.RecordDuration(*objRef, start)
}
// set initial status
if resetRepository, ok := r.resetStatus(repository); ok {
repository = resetRepository
if err := r.updateStatus(ctx, req, repository.Status); err != nil {
log.Error(err, "unable to update status")
return ctrl.Result{Requeue: true}, err
}
r.recordReadiness(ctx, repository)
}
// record the value of the reconciliation request, if any
// TODO(hidde): would be better to defer this in combination with
// always patching the status sub-resource after a reconciliation.
if v, ok := meta.ReconcileAnnotationValue(repository.GetAnnotations()); ok {
repository.Status.SetLastHandledReconcileRequest(v)
}
// purge old artifacts from storage
if err := r.gc(repository); err != nil {
log.Error(err, "unable to purge old artifacts")
}
// reconcile repository by downloading the index.yaml file
reconciledRepository, reconcileErr := r.reconcile(ctx, *repository.DeepCopy())
// update status with the reconciliation result
if err := r.updateStatus(ctx, req, reconciledRepository.Status); err != nil {
log.Error(err, "unable to update status")
return ctrl.Result{Requeue: true}, err
}
// if reconciliation failed, record the failure and requeue immediately
if reconcileErr != nil {
r.event(ctx, reconciledRepository, events.EventSeverityError, reconcileErr.Error())
r.recordReadiness(ctx, reconciledRepository)
return ctrl.Result{Requeue: true}, reconcileErr
}
// emit revision change event
if repository.Status.Artifact == nil || reconciledRepository.Status.Artifact.Revision != repository.Status.Artifact.Revision {
r.event(ctx, reconciledRepository, events.EventSeverityInfo, sourcev1.HelmRepositoryReadyMessage(reconciledRepository))
}
r.recordReadiness(ctx, reconciledRepository)
log.Info(fmt.Sprintf("Reconciliation finished in %s, next run in %s",
time.Since(start).String(),
repository.GetInterval().Duration.String(),
))
return ctrl.Result{RequeueAfter: repository.GetInterval().Duration}, nil
}
func (r *HelmRepositoryReconciler) reconcile(ctx context.Context, repo sourcev1.HelmRepository) (sourcev1.HelmRepository, error) {
log := ctrl.LoggerFrom(ctx)
clientOpts := []helmgetter.Option{
helmgetter.WithURL(repo.Spec.URL),
helmgetter.WithTimeout(repo.Spec.Timeout.Duration),
helmgetter.WithPassCredentialsAll(repo.Spec.PassCredentials),
}
if repo.Spec.SecretRef != nil {
name := types.NamespacedName{
Namespace: repo.GetNamespace(),
Name: repo.Spec.SecretRef.Name,
}
var secret corev1.Secret
err := r.Client.Get(ctx, name, &secret)
if err != nil {
err = fmt.Errorf("auth secret error: %w", err)
return sourcev1.HelmRepositoryNotReady(repo, sourcev1.AuthenticationFailedReason, err.Error()), err
}
authDir, err := os.MkdirTemp("", repo.Kind+"-"+repo.Namespace+"-"+repo.Name+"-")
if err != nil {
err = fmt.Errorf("failed to create temporary working directory for credentials: %w", err)
return sourcev1.HelmRepositoryNotReady(repo, sourcev1.AuthenticationFailedReason, err.Error()), err
}
defer func() {
if err := os.RemoveAll(authDir); err != nil {
log.Error(err, "failed to remove working directory", "path", authDir)
}
}()
opts, err := getter.ClientOptionsFromSecret(authDir, secret)
if err != nil {
err = fmt.Errorf("auth options error: %w", err)
return sourcev1.HelmRepositoryNotReady(repo, sourcev1.AuthenticationFailedReason, err.Error()), err
}
clientOpts = append(clientOpts, opts...)
}
chartRepo, err := repository.NewChartRepository(repo.Spec.URL, "", r.Getters, clientOpts)
if err != nil {
switch err.(type) {
case *url.Error:
return sourcev1.HelmRepositoryNotReady(repo, sourcev1.URLInvalidReason, err.Error()), err
default:
return sourcev1.HelmRepositoryNotReady(repo, sourcev1.IndexationFailedReason, err.Error()), err
}
}
checksum, err := chartRepo.CacheIndex()
if err != nil {
err = fmt.Errorf("failed to download repository index: %w", err)
return sourcev1.HelmRepositoryNotReady(repo, sourcev1.IndexationFailedReason, err.Error()), err
}
defer chartRepo.RemoveCache()
artifact := r.Storage.NewArtifactFor(repo.Kind,
repo.ObjectMeta.GetObjectMeta(),
"",
fmt.Sprintf("index-%s.yaml", checksum))
// Return early on unchanged index
if apimeta.IsStatusConditionTrue(repo.Status.Conditions, meta.ReadyCondition) &&
(repo.GetArtifact() != nil && repo.GetArtifact().Checksum == checksum) {
if artifact.URL != repo.GetArtifact().URL {
r.Storage.SetArtifactURL(repo.GetArtifact())
repo.Status.URL = r.Storage.SetHostname(repo.Status.URL)
}
return repo, nil
}
// Load the cached repository index to ensure it passes validation
if err := chartRepo.LoadFromCache(); err != nil {
return sourcev1.HelmRepositoryNotReady(repo, sourcev1.IndexationFailedReason, err.Error()), err
}
// The repository checksum is the SHA256 of the loaded bytes, after sorting
artifact.Revision = chartRepo.Checksum
chartRepo.Unload()
// Create artifact dir
err = r.Storage.MkdirAll(artifact)
if err != nil {
err = fmt.Errorf("unable to create repository index directory: %w", err)
return sourcev1.HelmRepositoryNotReady(repo, sourcev1.StorageOperationFailedReason, err.Error()), err
}
// Acquire lock
unlock, err := r.Storage.Lock(artifact)
if err != nil {
err = fmt.Errorf("unable to acquire lock: %w", err)
return sourcev1.HelmRepositoryNotReady(repo, sourcev1.StorageOperationFailedReason, err.Error()), err
}
defer unlock()
// Save artifact to storage
if err = r.Storage.CopyFromPath(&artifact, chartRepo.CachePath); err != nil {
return sourcev1.HelmRepositoryNotReady(repo, sourcev1.StorageOperationFailedReason, err.Error()), err
}
// Update index symlink
indexURL, err := r.Storage.Symlink(artifact, "index.yaml")
if err != nil {
err = fmt.Errorf("storage error: %w", err)
return sourcev1.HelmRepositoryNotReady(repo, sourcev1.StorageOperationFailedReason, err.Error()), err
}
message := fmt.Sprintf("Fetched revision: %s", artifact.Revision)
return sourcev1.HelmRepositoryReady(repo, artifact, indexURL, sourcev1.IndexationSucceededReason, message), nil
}
func (r *HelmRepositoryReconciler) reconcileDelete(ctx context.Context, repository sourcev1.HelmRepository) (ctrl.Result, error) {
// Our finalizer is still present, so lets handle garbage collection
if err := r.gc(repository); err != nil {
r.event(ctx, repository, events.EventSeverityError,
fmt.Sprintf("garbage collection for deleted resource failed: %s", err.Error()))
// Return the error so we retry the failed garbage collection
return ctrl.Result{}, err
}
// Record deleted status
r.recordReadiness(ctx, repository)
// Remove our finalizer from the list and update it
controllerutil.RemoveFinalizer(&repository, sourcev1.SourceFinalizer)
if err := r.Update(ctx, &repository); err != nil {
return ctrl.Result{}, err
}
// Stop reconciliation as the object is being deleted
return ctrl.Result{}, nil
}
// resetStatus returns a modified v1beta1.HelmRepository and a boolean indicating
// if the status field has been reset.
func (r *HelmRepositoryReconciler) resetStatus(repository sourcev1.HelmRepository) (sourcev1.HelmRepository, bool) {
// We do not have an artifact, or it does no longer exist
if repository.GetArtifact() == nil || !r.Storage.ArtifactExist(*repository.GetArtifact()) {
repository = sourcev1.HelmRepositoryProgressing(repository)
repository.Status.Artifact = nil
return repository, true
}
if repository.Generation != repository.Status.ObservedGeneration {
return sourcev1.HelmRepositoryProgressing(repository), true
}
return repository, false
}
// gc performs a garbage collection for the given v1beta1.HelmRepository.
// It removes all but the current artifact except for when the
// deletion timestamp is set, which will result in the removal of
// all artifacts for the resource.
func (r *HelmRepositoryReconciler) gc(repository sourcev1.HelmRepository) error {
if !repository.DeletionTimestamp.IsZero() {
return r.Storage.RemoveAll(r.Storage.NewArtifactFor(repository.Kind, repository.GetObjectMeta(), "", "*"))
}
if repository.GetArtifact() != nil {
return r.Storage.RemoveAllButCurrent(*repository.GetArtifact())
}
return nil
}
// event emits a Kubernetes event and forwards the event to notification controller if configured
func (r *HelmRepositoryReconciler) event(ctx context.Context, repository sourcev1.HelmRepository, severity, msg string) {
log := ctrl.LoggerFrom(ctx)
if r.EventRecorder != nil {
r.EventRecorder.Eventf(&repository, "Normal", severity, msg)
}
if r.ExternalEventRecorder != nil {
objRef, err := reference.GetReference(r.Scheme, &repository)
if err != nil {
log.Error(err, "unable to send event")
return
}
if err := r.ExternalEventRecorder.Eventf(*objRef, nil, severity, severity, msg); err != nil {
log.Error(err, "unable to send event")
return
}
}
}
func (r *HelmRepositoryReconciler) recordReadiness(ctx context.Context, repository sourcev1.HelmRepository) {
log := ctrl.LoggerFrom(ctx)
if r.MetricsRecorder == nil {
return
}
objRef, err := reference.GetReference(r.Scheme, &repository)
if err != nil {
log.Error(err, "unable to record readiness metric")
return
}
if rc := apimeta.FindStatusCondition(repository.Status.Conditions, meta.ReadyCondition); rc != nil {
r.MetricsRecorder.RecordCondition(*objRef, *rc, !repository.DeletionTimestamp.IsZero())
} else {
r.MetricsRecorder.RecordCondition(*objRef, metav1.Condition{
Type: meta.ReadyCondition,
Status: metav1.ConditionUnknown,
}, !repository.DeletionTimestamp.IsZero())
}
}
func (r *HelmRepositoryReconciler) updateStatus(ctx context.Context, req ctrl.Request, newStatus sourcev1.HelmRepositoryStatus) error {
var repository sourcev1.HelmRepository
if err := r.Get(ctx, req.NamespacedName, &repository); err != nil {
return err
}
patch := client.MergeFrom(repository.DeepCopy())
repository.Status = newStatus
return r.Status().Patch(ctx, &repository, patch)
}
func (r *HelmRepositoryReconciler) recordSuspension(ctx context.Context, hr sourcev1.HelmRepository) {
if r.MetricsRecorder == nil {
return
}
log := ctrl.LoggerFrom(ctx)
objRef, err := reference.GetReference(r.Scheme, &hr)
if err != nil {
log.Error(err, "unable to record suspended metric")
return
}
if !hr.DeletionTimestamp.IsZero() {
r.MetricsRecorder.RecordSuspend(*objRef, false)
} else {
r.MetricsRecorder.RecordSuspend(*objRef, hr.Spec.Suspend)
}
}

View File

@ -0,0 +1,406 @@
/*
Copyright 2020 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"net/http"
"os"
"path"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"github.com/fluxcd/pkg/apis/meta"
"github.com/fluxcd/pkg/helmtestserver"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta1"
)
var _ = Describe("HelmRepositoryReconciler", func() {
const (
timeout = time.Second * 30
interval = time.Second * 1
indexInterval = time.Second * 2
repositoryTimeout = time.Second * 5
)
Context("HelmRepository", func() {
var (
namespace *corev1.Namespace
helmServer *helmtestserver.HelmServer
err error
)
BeforeEach(func() {
namespace = &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{Name: "helm-repository-" + randStringRunes(5)},
}
err = k8sClient.Create(context.Background(), namespace)
Expect(err).NotTo(HaveOccurred(), "failed to create test namespace")
helmServer, err = helmtestserver.NewTempHelmServer()
Expect(err).To(Succeed())
})
AfterEach(func() {
helmServer.Stop()
os.RemoveAll(helmServer.Root())
Eventually(func() error {
return k8sClient.Delete(context.Background(), namespace)
}, timeout, interval).Should(Succeed(), "failed to delete test namespace")
})
It("Creates artifacts for", func() {
helmServer.Start()
Expect(helmServer.PackageChart(path.Join("testdata/charts/helmchart"))).Should(Succeed())
Expect(helmServer.GenerateIndex()).Should(Succeed())
key := types.NamespacedName{
Name: "helmrepository-sample-" + randStringRunes(5),
Namespace: namespace.Name,
}
created := &sourcev1.HelmRepository{
ObjectMeta: metav1.ObjectMeta{
Name: key.Name,
Namespace: key.Namespace,
},
Spec: sourcev1.HelmRepositorySpec{
URL: helmServer.URL(),
Interval: metav1.Duration{Duration: indexInterval},
Timeout: &metav1.Duration{Duration: repositoryTimeout},
},
}
Expect(k8sClient.Create(context.Background(), created)).Should(Succeed())
By("Expecting artifact")
got := &sourcev1.HelmRepository{}
Eventually(func() bool {
_ = k8sClient.Get(context.Background(), key, got)
return got.Status.Artifact != nil && storage.ArtifactExist(*got.Status.Artifact)
}, timeout, interval).Should(BeTrue())
By("Updating the chart index")
// Regenerating the index is sufficient to make the revision change
Expect(helmServer.GenerateIndex()).Should(Succeed())
By("Expecting revision change and GC")
Eventually(func() bool {
now := &sourcev1.HelmRepository{}
_ = k8sClient.Get(context.Background(), key, now)
// Test revision change and garbage collection
return now.Status.Artifact.Revision != got.Status.Artifact.Revision &&
!storage.ArtifactExist(*got.Status.Artifact)
}, timeout, interval).Should(BeTrue())
updated := &sourcev1.HelmRepository{}
Expect(k8sClient.Get(context.Background(), key, updated)).Should(Succeed())
updated.Spec.URL = "invalid#url?"
Expect(k8sClient.Update(context.Background(), updated)).Should(Succeed())
Eventually(func() bool {
_ = k8sClient.Get(context.Background(), key, updated)
for _, c := range updated.Status.Conditions {
if c.Reason == sourcev1.IndexationFailedReason {
return true
}
}
return false
}, timeout, interval).Should(BeTrue())
Expect(updated.Status.Artifact).ToNot(BeNil())
By("Expecting to delete successfully")
got = &sourcev1.HelmRepository{}
Eventually(func() error {
_ = k8sClient.Get(context.Background(), key, got)
return k8sClient.Delete(context.Background(), got)
}, timeout, interval).Should(Succeed())
By("Expecting delete to finish")
Eventually(func() error {
r := &sourcev1.HelmRepository{}
return k8sClient.Get(context.Background(), key, r)
}, timeout, interval).ShouldNot(Succeed())
exists := func(path string) bool {
// wait for tmp sync on macOS
time.Sleep(time.Second)
_, err := os.Stat(path)
return err == nil
}
By("Expecting GC after delete")
Eventually(exists(got.Status.Artifact.Path), timeout, interval).ShouldNot(BeTrue())
})
It("Handles timeout", func() {
helmServer.Start()
Expect(helmServer.PackageChart(path.Join("testdata/charts/helmchart"))).Should(Succeed())
Expect(helmServer.GenerateIndex()).Should(Succeed())
key := types.NamespacedName{
Name: "helmrepository-sample-" + randStringRunes(5),
Namespace: namespace.Name,
}
created := &sourcev1.HelmRepository{
ObjectMeta: metav1.ObjectMeta{
Name: key.Name,
Namespace: key.Namespace,
},
Spec: sourcev1.HelmRepositorySpec{
URL: helmServer.URL(),
Interval: metav1.Duration{Duration: indexInterval},
},
}
Expect(k8sClient.Create(context.Background(), created)).Should(Succeed())
defer k8sClient.Delete(context.Background(), created)
By("Expecting index download to succeed")
Eventually(func() bool {
got := &sourcev1.HelmRepository{}
_ = k8sClient.Get(context.Background(), key, got)
for _, condition := range got.Status.Conditions {
if condition.Reason == sourcev1.IndexationSucceededReason {
return true
}
}
return false
}, timeout, interval).Should(BeTrue())
By("Expecting index download to timeout")
updated := &sourcev1.HelmRepository{}
Expect(k8sClient.Get(context.Background(), key, updated)).Should(Succeed())
updated.Spec.Timeout = &metav1.Duration{Duration: time.Microsecond}
Expect(k8sClient.Update(context.Background(), updated)).Should(Succeed())
Eventually(func() string {
got := &sourcev1.HelmRepository{}
_ = k8sClient.Get(context.Background(), key, got)
for _, condition := range got.Status.Conditions {
if condition.Reason == sourcev1.IndexationFailedReason {
return condition.Message
}
}
return ""
}, timeout, interval).Should(MatchRegexp("(?i)timeout"))
})
It("Authenticates when basic auth credentials are provided", func() {
var username, password = "john", "doe"
helmServer.WithMiddleware(func(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
u, p, ok := r.BasicAuth()
if !ok || username != u || password != p {
w.WriteHeader(401)
return
}
handler.ServeHTTP(w, r)
})
})
helmServer.Start()
Expect(helmServer.PackageChart(path.Join("testdata/charts/helmchart"))).Should(Succeed())
Expect(helmServer.GenerateIndex()).Should(Succeed())
secretKey := types.NamespacedName{
Name: "helmrepository-auth-" + randStringRunes(5),
Namespace: namespace.Name,
}
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretKey.Name,
Namespace: secretKey.Namespace,
},
}
Expect(k8sClient.Create(context.Background(), secret)).Should(Succeed())
key := types.NamespacedName{
Name: "helmrepository-sample-" + randStringRunes(5),
Namespace: namespace.Name,
}
created := &sourcev1.HelmRepository{
ObjectMeta: metav1.ObjectMeta{
Name: key.Name,
Namespace: key.Namespace,
},
Spec: sourcev1.HelmRepositorySpec{
URL: helmServer.URL(),
SecretRef: &meta.LocalObjectReference{
Name: secretKey.Name,
},
Interval: metav1.Duration{Duration: indexInterval},
},
}
Expect(k8sClient.Create(context.Background(), created)).Should(Succeed())
defer k8sClient.Delete(context.Background(), created)
By("Expecting 401")
Eventually(func() bool {
got := &sourcev1.HelmRepository{}
_ = k8sClient.Get(context.Background(), key, got)
for _, c := range got.Status.Conditions {
if c.Reason == sourcev1.IndexationFailedReason &&
strings.Contains(c.Message, "401 Unauthorized") {
return true
}
}
return false
}, timeout, interval).Should(BeTrue())
By("Expecting missing field error")
secret.Data = map[string][]byte{
"username": []byte(username),
}
Expect(k8sClient.Update(context.Background(), secret)).Should(Succeed())
Eventually(func() bool {
got := &sourcev1.HelmRepository{}
_ = k8sClient.Get(context.Background(), key, got)
for _, c := range got.Status.Conditions {
if c.Reason == sourcev1.AuthenticationFailedReason {
return true
}
}
return false
}, timeout, interval).Should(BeTrue())
By("Expecting artifact")
secret.Data["password"] = []byte(password)
Expect(k8sClient.Update(context.Background(), secret)).Should(Succeed())
Eventually(func() bool {
got := &sourcev1.HelmRepository{}
_ = k8sClient.Get(context.Background(), key, got)
return got.Status.Artifact != nil &&
storage.ArtifactExist(*got.Status.Artifact)
}, timeout, interval).Should(BeTrue())
By("Expecting missing secret error")
Expect(k8sClient.Delete(context.Background(), secret)).Should(Succeed())
got := &sourcev1.HelmRepository{}
Eventually(func() bool {
_ = k8sClient.Get(context.Background(), key, got)
for _, c := range got.Status.Conditions {
if c.Reason == sourcev1.AuthenticationFailedReason {
return true
}
}
return false
}, timeout, interval).Should(BeTrue())
Expect(got.Status.Artifact).ShouldNot(BeNil())
})
It("Authenticates when TLS credentials are provided", func() {
err = helmServer.StartTLS(examplePublicKey, examplePrivateKey, exampleCA, "example.com")
Expect(err).NotTo(HaveOccurred())
Expect(helmServer.PackageChart(path.Join("testdata/charts/helmchart"))).Should(Succeed())
Expect(helmServer.GenerateIndex()).Should(Succeed())
secretKey := types.NamespacedName{
Name: "helmrepository-auth-" + randStringRunes(5),
Namespace: namespace.Name,
}
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretKey.Name,
Namespace: secretKey.Namespace,
},
}
Expect(k8sClient.Create(context.Background(), secret)).Should(Succeed())
key := types.NamespacedName{
Name: "helmrepository-sample-" + randStringRunes(5),
Namespace: namespace.Name,
}
created := &sourcev1.HelmRepository{
ObjectMeta: metav1.ObjectMeta{
Name: key.Name,
Namespace: key.Namespace,
},
Spec: sourcev1.HelmRepositorySpec{
URL: helmServer.URL(),
SecretRef: &meta.LocalObjectReference{
Name: secretKey.Name,
},
Interval: metav1.Duration{Duration: indexInterval},
},
}
Expect(k8sClient.Create(context.Background(), created)).Should(Succeed())
defer k8sClient.Delete(context.Background(), created)
By("Expecting unknown authority error")
Eventually(func() bool {
got := &sourcev1.HelmRepository{}
_ = k8sClient.Get(context.Background(), key, got)
for _, c := range got.Status.Conditions {
if c.Reason == sourcev1.IndexationFailedReason &&
strings.Contains(c.Message, "certificate signed by unknown authority") {
return true
}
}
return false
}, timeout, interval).Should(BeTrue())
By("Expecting missing field error")
secret.Data = map[string][]byte{
"certFile": examplePublicKey,
}
Expect(k8sClient.Update(context.Background(), secret)).Should(Succeed())
Eventually(func() bool {
got := &sourcev1.HelmRepository{}
_ = k8sClient.Get(context.Background(), key, got)
for _, c := range got.Status.Conditions {
if c.Reason == sourcev1.AuthenticationFailedReason {
return true
}
}
return false
}, timeout, interval).Should(BeTrue())
By("Expecting artifact")
secret.Data["keyFile"] = examplePrivateKey
secret.Data["caFile"] = exampleCA
Expect(k8sClient.Update(context.Background(), secret)).Should(Succeed())
Eventually(func() bool {
got := &sourcev1.HelmRepository{}
_ = k8sClient.Get(context.Background(), key, got)
return got.Status.Artifact != nil &&
storage.ArtifactExist(*got.Status.Artifact)
}, timeout, interval).Should(BeTrue())
By("Expecting missing secret error")
Expect(k8sClient.Delete(context.Background(), secret)).Should(Succeed())
got := &sourcev1.HelmRepository{}
Eventually(func() bool {
_ = k8sClient.Get(context.Background(), key, got)
for _, c := range got.Status.Conditions {
if c.Reason == sourcev1.AuthenticationFailedReason {
return true
}
}
return false
}, timeout, interval).Should(BeTrue())
Expect(got.Status.Artifact).ShouldNot(BeNil())
})
})
})

View File

@ -14,13 +14,13 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
package controllers
import (
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/predicate"
sourcev1 "github.com/fluxcd/source-controller/api/v1"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta1"
)
type SourceRevisionChangePredicate struct {

459
controllers/storage.go Normal file
View File

@ -0,0 +1,459 @@
/*
Copyright 2020 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"archive/tar"
"compress/gzip"
"crypto/sha256"
"fmt"
"hash"
"io"
"net/url"
"os"
"path/filepath"
"strings"
"time"
securejoin "github.com/cyphar/filepath-securejoin"
"github.com/go-git/go-git/v5/plumbing/format/gitignore"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/fluxcd/pkg/lockedfile"
"github.com/fluxcd/pkg/untar"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta1"
"github.com/fluxcd/source-controller/internal/fs"
"github.com/fluxcd/source-controller/pkg/sourceignore"
)
// Storage manages artifacts
type Storage struct {
// BasePath is the local directory path where the source artifacts are stored.
BasePath string `json:"basePath"`
// Hostname is the file server host name used to compose the artifacts URIs.
Hostname string `json:"hostname"`
// Timeout for artifacts operations
Timeout time.Duration `json:"timeout"`
}
// NewStorage creates the storage helper for a given path and hostname
func NewStorage(basePath string, hostname string, timeout time.Duration) (*Storage, error) {
if f, err := os.Stat(basePath); os.IsNotExist(err) || !f.IsDir() {
return nil, fmt.Errorf("invalid dir path: %s", basePath)
}
return &Storage{
BasePath: basePath,
Hostname: hostname,
Timeout: timeout,
}, nil
}
// NewArtifactFor returns a new v1beta1.Artifact.
func (s *Storage) NewArtifactFor(kind string, metadata metav1.Object, revision, fileName string) sourcev1.Artifact {
path := sourcev1.ArtifactPath(kind, metadata.GetNamespace(), metadata.GetName(), fileName)
artifact := sourcev1.Artifact{
Path: path,
Revision: revision,
}
s.SetArtifactURL(&artifact)
return artifact
}
// SetArtifactURL sets the URL on the given v1beta1.Artifact.
func (s Storage) SetArtifactURL(artifact *sourcev1.Artifact) {
if artifact.Path == "" {
return
}
artifact.URL = fmt.Sprintf("http://%s/%s", s.Hostname, artifact.Path)
}
// SetHostname sets the hostname of the given URL string to the current Storage.Hostname and returns the result.
func (s Storage) SetHostname(URL string) string {
u, err := url.Parse(URL)
if err != nil {
return ""
}
u.Host = s.Hostname
return u.String()
}
// MkdirAll calls os.MkdirAll for the given v1beta1.Artifact base dir.
func (s *Storage) MkdirAll(artifact sourcev1.Artifact) error {
dir := filepath.Dir(s.LocalPath(artifact))
return os.MkdirAll(dir, 0777)
}
// RemoveAll calls os.RemoveAll for the given v1beta1.Artifact base dir.
func (s *Storage) RemoveAll(artifact sourcev1.Artifact) error {
dir := filepath.Dir(s.LocalPath(artifact))
return os.RemoveAll(dir)
}
// RemoveAllButCurrent removes all files for the given v1beta1.Artifact base dir, excluding the current one.
func (s *Storage) RemoveAllButCurrent(artifact sourcev1.Artifact) error {
localPath := s.LocalPath(artifact)
dir := filepath.Dir(localPath)
var errors []string
_ = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
errors = append(errors, err.Error())
return nil
}
if path != localPath && !info.IsDir() && info.Mode()&os.ModeSymlink != os.ModeSymlink {
if err := os.Remove(path); err != nil {
errors = append(errors, info.Name())
}
}
return nil
})
if len(errors) > 0 {
return fmt.Errorf("failed to remove files: %s", strings.Join(errors, " "))
}
return nil
}
// ArtifactExist returns a boolean indicating whether the v1beta1.Artifact exists in storage and is a regular file.
func (s *Storage) ArtifactExist(artifact sourcev1.Artifact) bool {
fi, err := os.Lstat(s.LocalPath(artifact))
if err != nil {
return false
}
return fi.Mode().IsRegular()
}
// ArchiveFileFilter must return true if a file should not be included in the archive after inspecting the given path
// and/or os.FileInfo.
type ArchiveFileFilter func(p string, fi os.FileInfo) bool
// SourceIgnoreFilter returns an ArchiveFileFilter that filters out files matching sourceignore.VCSPatterns and any of
// the provided patterns.
// If an empty gitignore.Pattern slice is given, the matcher is set to sourceignore.NewDefaultMatcher.
func SourceIgnoreFilter(ps []gitignore.Pattern, domain []string) ArchiveFileFilter {
matcher := sourceignore.NewDefaultMatcher(ps, domain)
if len(ps) > 0 {
ps = append(sourceignore.VCSPatterns(domain), ps...)
matcher = sourceignore.NewMatcher(ps)
}
return func(p string, fi os.FileInfo) bool {
return matcher.Match(strings.Split(p, string(filepath.Separator)), fi.IsDir())
}
}
// Archive atomically archives the given directory as a tarball to the given v1beta1.Artifact path, excluding
// directories and any ArchiveFileFilter matches. While archiving, any environment specific data (for example,
// the user and group name) is stripped from file headers.
// If successful, it sets the checksum and last update time on the artifact.
func (s *Storage) Archive(artifact *sourcev1.Artifact, dir string, filter ArchiveFileFilter) (err error) {
if f, err := os.Stat(dir); os.IsNotExist(err) || !f.IsDir() {
return fmt.Errorf("invalid dir path: %s", dir)
}
localPath := s.LocalPath(*artifact)
tf, err := os.CreateTemp(filepath.Split(localPath))
if err != nil {
return err
}
tmpName := tf.Name()
defer func() {
if err != nil {
os.Remove(tmpName)
}
}()
h := newHash()
mw := io.MultiWriter(h, tf)
gw := gzip.NewWriter(mw)
tw := tar.NewWriter(gw)
if err := filepath.Walk(dir, func(p string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
// Ignore anything that is not a file or directories e.g. symlinks
if m := fi.Mode(); !(m.IsRegular() || m.IsDir()) {
return nil
}
// Skip filtered files
if filter != nil && filter(p, fi) {
return nil
}
header, err := tar.FileInfoHeader(fi, p)
if err != nil {
return err
}
// The name needs to be modified to maintain directory structure
// as tar.FileInfoHeader only has access to the base name of the file.
// Ref: https://golang.org/src/archive/tar/common.go?#L626
relFilePath := p
if filepath.IsAbs(dir) {
relFilePath, err = filepath.Rel(dir, p)
if err != nil {
return err
}
}
header.Name = relFilePath
// We want to remove any environment specific data as well, this
// ensures the checksum is purely content based.
header.Gid = 0
header.Uid = 0
header.Uname = ""
header.Gname = ""
header.ModTime = time.Time{}
header.AccessTime = time.Time{}
header.ChangeTime = time.Time{}
if err := tw.WriteHeader(header); err != nil {
return err
}
if !fi.Mode().IsRegular() {
return nil
}
f, err := os.Open(p)
if err != nil {
f.Close()
return err
}
if _, err := io.Copy(tw, f); err != nil {
f.Close()
return err
}
return f.Close()
}); err != nil {
tw.Close()
gw.Close()
tf.Close()
return err
}
if err := tw.Close(); err != nil {
gw.Close()
tf.Close()
return err
}
if err := gw.Close(); err != nil {
tf.Close()
return err
}
if err := tf.Close(); err != nil {
return err
}
if err := os.Chmod(tmpName, 0644); err != nil {
return err
}
if err := fs.RenameWithFallback(tmpName, localPath); err != nil {
return err
}
artifact.Checksum = fmt.Sprintf("%x", h.Sum(nil))
artifact.LastUpdateTime = metav1.Now()
return nil
}
// AtomicWriteFile atomically writes the io.Reader contents to the v1beta1.Artifact path.
// If successful, it sets the checksum and last update time on the artifact.
func (s *Storage) AtomicWriteFile(artifact *sourcev1.Artifact, reader io.Reader, mode os.FileMode) (err error) {
localPath := s.LocalPath(*artifact)
tf, err := os.CreateTemp(filepath.Split(localPath))
if err != nil {
return err
}
tfName := tf.Name()
defer func() {
if err != nil {
os.Remove(tfName)
}
}()
h := newHash()
mw := io.MultiWriter(h, tf)
if _, err := io.Copy(mw, reader); err != nil {
tf.Close()
return err
}
if err := tf.Close(); err != nil {
return err
}
if err := os.Chmod(tfName, mode); err != nil {
return err
}
if err := fs.RenameWithFallback(tfName, localPath); err != nil {
return err
}
artifact.Checksum = fmt.Sprintf("%x", h.Sum(nil))
artifact.LastUpdateTime = metav1.Now()
return nil
}
// Copy atomically copies the io.Reader contents to the v1beta1.Artifact path.
// If successful, it sets the checksum and last update time on the artifact.
func (s *Storage) Copy(artifact *sourcev1.Artifact, reader io.Reader) (err error) {
localPath := s.LocalPath(*artifact)
tf, err := os.CreateTemp(filepath.Split(localPath))
if err != nil {
return err
}
tfName := tf.Name()
defer func() {
if err != nil {
os.Remove(tfName)
}
}()
h := newHash()
mw := io.MultiWriter(h, tf)
if _, err := io.Copy(mw, reader); err != nil {
tf.Close()
return err
}
if err := tf.Close(); err != nil {
return err
}
if err := fs.RenameWithFallback(tfName, localPath); err != nil {
return err
}
artifact.Checksum = fmt.Sprintf("%x", h.Sum(nil))
artifact.LastUpdateTime = metav1.Now()
return nil
}
// CopyFromPath atomically copies the contents of the given path to the path of the v1beta1.Artifact.
// If successful, the checksum and last update time on the artifact is set.
func (s *Storage) CopyFromPath(artifact *sourcev1.Artifact, path string) (err error) {
f, err := os.Open(path)
if err != nil {
return err
}
defer func() {
if cerr := f.Close(); cerr != nil && err == nil {
err = cerr
}
}()
err = s.Copy(artifact, f)
return err
}
// CopyToPath copies the contents in the (sub)path of the given artifact to the given path.
func (s *Storage) CopyToPath(artifact *sourcev1.Artifact, subPath, toPath string) error {
// create a tmp directory to store artifact
tmp, err := os.MkdirTemp("", "flux-include-")
if err != nil {
return err
}
defer os.RemoveAll(tmp)
// read artifact file content
localPath := s.LocalPath(*artifact)
f, err := os.Open(localPath)
if err != nil {
return err
}
defer f.Close()
// untar the artifact
untarPath := filepath.Join(tmp, "unpack")
if _, err = untar.Untar(f, untarPath); err != nil {
return err
}
// create the destination parent dir
if err = os.MkdirAll(filepath.Dir(toPath), os.ModePerm); err != nil {
return err
}
// copy the artifact content to the destination dir
fromPath, err := securejoin.SecureJoin(untarPath, subPath)
if err != nil {
return err
}
if err := fs.RenameWithFallback(fromPath, toPath); err != nil {
return err
}
return nil
}
// Symlink creates or updates a symbolic link for the given v1beta1.Artifact and returns the URL for the symlink.
func (s *Storage) Symlink(artifact sourcev1.Artifact, linkName string) (string, error) {
localPath := s.LocalPath(artifact)
dir := filepath.Dir(localPath)
link := filepath.Join(dir, linkName)
tmpLink := link + ".tmp"
if err := os.Remove(tmpLink); err != nil && !os.IsNotExist(err) {
return "", err
}
if err := os.Symlink(localPath, tmpLink); err != nil {
return "", err
}
if err := os.Rename(tmpLink, link); err != nil {
return "", err
}
url := fmt.Sprintf("http://%s/%s", s.Hostname, filepath.Join(filepath.Dir(artifact.Path), linkName))
return url, nil
}
// Checksum returns the SHA256 checksum for the data of the given io.Reader as a string.
func (s *Storage) Checksum(reader io.Reader) string {
h := newHash()
_, _ = io.Copy(h, reader)
return fmt.Sprintf("%x", h.Sum(nil))
}
// Lock creates a file lock for the given v1beta1.Artifact.
func (s *Storage) Lock(artifact sourcev1.Artifact) (unlock func(), err error) {
lockFile := s.LocalPath(artifact) + ".lock"
mutex := lockedfile.MutexAt(lockFile)
return mutex.Lock()
}
// LocalPath returns the secure local path of the given artifact (that is: relative to the Storage.BasePath).
func (s *Storage) LocalPath(artifact sourcev1.Artifact) string {
if artifact.Path == "" {
return ""
}
path, err := securejoin.SecureJoin(s.BasePath, artifact.Path)
if err != nil {
return ""
}
return path
}
// newHash returns a new SHA256 hash.
func newHash() hash.Hash {
return sha256.New()
}

405
controllers/storage_test.go Normal file
View File

@ -0,0 +1,405 @@
/*
Copyright 2020, 2021 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"archive/tar"
"compress/gzip"
"fmt"
"io"
"os"
"path"
"path/filepath"
"testing"
"time"
"github.com/go-git/go-git/v5/plumbing/format/gitignore"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta1"
)
func createStoragePath() (string, error) {
return os.MkdirTemp("", "")
}
func cleanupStoragePath(dir string) func() {
return func() { os.RemoveAll(dir) }
}
func TestStorageConstructor(t *testing.T) {
dir, err := createStoragePath()
if err != nil {
t.Fatal(err)
}
t.Cleanup(cleanupStoragePath(dir))
if _, err := NewStorage("/nonexistent", "hostname", time.Minute); err == nil {
t.Fatal("nonexistent path was allowable in storage constructor")
}
f, err := os.CreateTemp(dir, "")
if err != nil {
t.Fatalf("while creating temporary file: %v", err)
}
f.Close()
if _, err := NewStorage(f.Name(), "hostname", time.Minute); err == nil {
os.Remove(f.Name())
t.Fatal("file path was accepted as basedir")
}
os.Remove(f.Name())
if _, err := NewStorage(dir, "hostname", time.Minute); err != nil {
t.Fatalf("Valid path did not successfully return: %v", err)
}
}
// walks a tar.gz and looks for paths with the basename. It does not match
// symlinks properly at this time because that's painful.
func walkTar(tarFile string, match string, dir bool) (int64, bool, error) {
f, err := os.Open(tarFile)
if err != nil {
return 0, false, fmt.Errorf("could not open file: %w", err)
}
defer f.Close()
gzr, err := gzip.NewReader(f)
if err != nil {
return 0, false, fmt.Errorf("could not unzip file: %w", err)
}
defer gzr.Close()
tr := tar.NewReader(gzr)
for {
header, err := tr.Next()
if err == io.EOF {
break
} else if err != nil {
return 0, false, fmt.Errorf("corrupt tarball reading header: %w", err)
}
switch header.Typeflag {
case tar.TypeDir:
if header.Name == match && dir {
return 0, true, nil
}
case tar.TypeReg:
if header.Name == match {
return header.Size, true, nil
}
default:
// skip
}
}
return 0, false, nil
}
func TestStorage_Archive(t *testing.T) {
dir, err := createStoragePath()
if err != nil {
t.Fatal(err)
}
t.Cleanup(cleanupStoragePath(dir))
storage, err := NewStorage(dir, "hostname", time.Minute)
if err != nil {
t.Fatalf("error while bootstrapping storage: %v", err)
}
createFiles := func(files map[string][]byte) (dir string, err error) {
defer func() {
if err != nil && dir != "" {
os.RemoveAll(dir)
}
}()
dir, err = os.MkdirTemp("", "archive-test-files-")
if err != nil {
return
}
for name, b := range files {
absPath := filepath.Join(dir, name)
if err = os.MkdirAll(filepath.Dir(absPath), 0755); err != nil {
return
}
f, err := os.Create(absPath)
if err != nil {
return "", fmt.Errorf("could not create file %q: %w", absPath, err)
}
if n, err := f.Write(b); err != nil {
f.Close()
return "", fmt.Errorf("could not write %d bytes to file %q: %w", n, f.Name(), err)
}
f.Close()
}
return
}
matchFiles := func(t *testing.T, storage *Storage, artifact sourcev1.Artifact, files map[string][]byte, dirs []string) {
t.Helper()
for name, b := range files {
mustExist := !(name[0:1] == "!")
if !mustExist {
name = name[1:]
}
s, exist, err := walkTar(storage.LocalPath(artifact), name, false)
if err != nil {
t.Fatalf("failed reading tarball: %v", err)
}
if bs := int64(len(b)); s != bs {
t.Fatalf("%q size %v != %v", name, s, bs)
}
if exist != mustExist {
if mustExist {
t.Errorf("could not find file %q in tarball", name)
} else {
t.Errorf("tarball contained excluded file %q", name)
}
}
}
for _, name := range dirs {
mustExist := !(name[0:1] == "!")
if !mustExist {
name = name[1:]
}
_, exist, err := walkTar(storage.LocalPath(artifact), name, true)
if err != nil {
t.Fatalf("failed reading tarball: %v", err)
}
if exist != mustExist {
if mustExist {
t.Errorf("could not find dir %q in tarball", name)
} else {
t.Errorf("tarball contained excluded file %q", name)
}
}
}
}
tests := []struct {
name string
files map[string][]byte
filter ArchiveFileFilter
want map[string][]byte
wantDirs []string
wantErr bool
}{
{
name: "no filter",
files: map[string][]byte{
".git/config": nil,
"file.jpg": []byte(`contents`),
"manifest.yaml": nil,
},
filter: nil,
want: map[string][]byte{
".git/config": nil,
"file.jpg": []byte(`contents`),
"manifest.yaml": nil,
},
},
{
name: "exclude VCS",
files: map[string][]byte{
".git/config": nil,
"manifest.yaml": nil,
},
wantDirs: []string{
"!.git",
},
filter: SourceIgnoreFilter(nil, nil),
want: map[string][]byte{
"!.git/config": nil,
"manifest.yaml": nil,
},
},
{
name: "custom",
files: map[string][]byte{
".git/config": nil,
"custom": nil,
"horse.jpg": nil,
},
filter: SourceIgnoreFilter([]gitignore.Pattern{
gitignore.ParsePattern("custom", nil),
}, nil),
want: map[string][]byte{
"!git/config": nil,
"!custom": nil,
"horse.jpg": nil,
},
wantErr: false,
},
{
name: "including directories",
files: map[string][]byte{
"test/.gitkeep": nil,
},
filter: SourceIgnoreFilter([]gitignore.Pattern{
gitignore.ParsePattern("custom", nil),
}, nil),
wantDirs: []string{
"test",
},
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
dir, err := createFiles(tt.files)
if err != nil {
t.Error(err)
return
}
defer os.RemoveAll(dir)
artifact := sourcev1.Artifact{
Path: filepath.Join(randStringRunes(10), randStringRunes(10), randStringRunes(10)+".tar.gz"),
}
if err := storage.MkdirAll(artifact); err != nil {
t.Fatalf("artifact directory creation failed: %v", err)
}
if err := storage.Archive(&artifact, dir, tt.filter); (err != nil) != tt.wantErr {
t.Errorf("Archive() error = %v, wantErr %v", err, tt.wantErr)
}
matchFiles(t, storage, artifact, tt.want, tt.wantDirs)
})
}
}
func TestStorageRemoveAllButCurrent(t *testing.T) {
t.Run("bad directory in archive", func(t *testing.T) {
dir, err := os.MkdirTemp("", "")
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() { os.RemoveAll(dir) })
s, err := NewStorage(dir, "hostname", time.Minute)
if err != nil {
t.Fatalf("Valid path did not successfully return: %v", err)
}
if err := s.RemoveAllButCurrent(sourcev1.Artifact{Path: path.Join(dir, "really", "nonexistent")}); err == nil {
t.Fatal("Did not error while pruning non-existent path")
}
})
}
func TestStorageCopyFromPath(t *testing.T) {
type File struct {
Name string
Content []byte
}
dir, err := createStoragePath()
if err != nil {
t.Fatal(err)
}
t.Cleanup(cleanupStoragePath(dir))
storage, err := NewStorage(dir, "hostname", time.Minute)
if err != nil {
t.Fatalf("error while bootstrapping storage: %v", err)
}
createFile := func(file *File) (absPath string, err error) {
defer func() {
if err != nil && dir != "" {
os.RemoveAll(dir)
}
}()
dir, err = os.MkdirTemp("", "test-files-")
if err != nil {
return
}
absPath = filepath.Join(dir, file.Name)
if err = os.MkdirAll(filepath.Dir(absPath), 0755); err != nil {
return
}
f, err := os.Create(absPath)
if err != nil {
return "", fmt.Errorf("could not create file %q: %w", absPath, err)
}
if n, err := f.Write(file.Content); err != nil {
f.Close()
return "", fmt.Errorf("could not write %d bytes to file %q: %w", n, f.Name(), err)
}
f.Close()
return
}
matchFile := func(t *testing.T, storage *Storage, artifact sourcev1.Artifact, file *File, expectMismatch bool) {
c, err := os.ReadFile(storage.LocalPath(artifact))
if err != nil {
t.Fatalf("failed reading file: %v", err)
}
if (string(c) != string(file.Content)) != expectMismatch {
t.Errorf("artifact content does not match and not expecting mismatch, got: %q, want: %q", string(c), string(file.Content))
}
}
tests := []struct {
name string
file *File
want *File
expectMismatch bool
}{
{
name: "content match",
file: &File{
Name: "manifest.yaml",
Content: []byte(`contents`),
},
want: &File{
Name: "manifest.yaml",
Content: []byte(`contents`),
},
},
{
name: "content not match",
file: &File{
Name: "manifest.yaml",
Content: []byte(`contents`),
},
want: &File{
Name: "manifest.yaml",
Content: []byte(`mismatch contents`),
},
expectMismatch: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
absPath, err := createFile(tt.file)
if err != nil {
t.Error(err)
return
}
defer os.RemoveAll(absPath)
artifact := sourcev1.Artifact{
Path: filepath.Join(randStringRunes(10), randStringRunes(10), randStringRunes(10)),
}
if err := storage.MkdirAll(artifact); err != nil {
t.Fatalf("artifact directory creation failed: %v", err)
}
if err := storage.CopyFromPath(&artifact, absPath); err != nil {
t.Errorf("CopyFromPath() error = %v", err)
}
matchFile(t, storage, artifact, tt.want, tt.expectMismatch)
})
}
}

195
controllers/suite_test.go Normal file
View File

@ -0,0 +1,195 @@
/*
Copyright 2020 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"math/rand"
"net/http"
"os"
"path/filepath"
"testing"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"helm.sh/helm/v3/pkg/getter"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
sourcev1 "github.com/fluxcd/source-controller/api/v1beta1"
// +kubebuilder:scaffold:imports
)
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
var cfg *rest.Config
var k8sClient client.Client
var k8sManager ctrl.Manager
var testEnv *envtest.Environment
var storage *Storage
var examplePublicKey []byte
var examplePrivateKey []byte
var exampleCA []byte
var ctx context.Context
var cancel context.CancelFunc
func TestAPIs(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecsWithDefaultAndCustomReporters(t,
"Controller Suite",
[]Reporter{printer.NewlineReporter{}})
}
var _ = BeforeSuite(func(done Done) {
logf.SetLogger(
zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)),
)
ctx, cancel = context.WithCancel(context.TODO())
By("bootstrapping test environment")
t := true
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
testEnv = &envtest.Environment{
UseExistingCluster: &t,
}
} else {
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")},
}
}
var err error
cfg, err = testEnv.Start()
Expect(err).ToNot(HaveOccurred())
Expect(cfg).ToNot(BeNil())
err = sourcev1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
err = sourcev1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
err = sourcev1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
// +kubebuilder:scaffold:scheme
Expect(loadExampleKeys()).To(Succeed())
tmpStoragePath, err := os.MkdirTemp("", "source-controller-storage-")
Expect(err).NotTo(HaveOccurred(), "failed to create tmp storage dir")
storage, err = NewStorage(tmpStoragePath, "localhost:5050", time.Second*30)
Expect(err).NotTo(HaveOccurred(), "failed to create tmp storage")
// serve artifacts from the filesystem, as done in main.go
fs := http.FileServer(http.Dir(tmpStoragePath))
http.Handle("/", fs)
go http.ListenAndServe(":5050", nil)
k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{
Scheme: scheme.Scheme,
})
Expect(err).ToNot(HaveOccurred())
err = (&GitRepositoryReconciler{
Client: k8sManager.GetClient(),
Scheme: scheme.Scheme,
Storage: storage,
}).SetupWithManager(k8sManager)
Expect(err).ToNot(HaveOccurred(), "failed to setup GtRepositoryReconciler")
err = (&HelmRepositoryReconciler{
Client: k8sManager.GetClient(),
Scheme: scheme.Scheme,
Storage: storage,
Getters: getter.Providers{getter.Provider{
Schemes: []string{"http", "https"},
New: getter.NewHTTPGetter,
}},
}).SetupWithManager(k8sManager)
Expect(err).ToNot(HaveOccurred(), "failed to setup HelmRepositoryReconciler")
err = (&HelmChartReconciler{
Client: k8sManager.GetClient(),
Scheme: scheme.Scheme,
Storage: storage,
Getters: getter.Providers{getter.Provider{
Schemes: []string{"http", "https"},
New: getter.NewHTTPGetter,
}},
}).SetupWithManager(k8sManager)
Expect(err).ToNot(HaveOccurred(), "failed to setup HelmChartReconciler")
go func() {
defer GinkgoRecover()
err = k8sManager.Start(ctx)
Expect(err).ToNot(HaveOccurred())
}()
k8sClient = k8sManager.GetClient()
Expect(k8sClient).ToNot(BeNil())
close(done)
}, 60)
var _ = AfterSuite(func() {
cancel()
By("tearing down the test environment")
if storage != nil {
err := os.RemoveAll(storage.BasePath)
Expect(err).NotTo(HaveOccurred())
}
err := testEnv.Stop()
Expect(err).ToNot(HaveOccurred())
})
func init() {
rand.Seed(time.Now().UnixNano())
}
func loadExampleKeys() (err error) {
examplePublicKey, err = os.ReadFile("testdata/certs/server.pem")
if err != nil {
return err
}
examplePrivateKey, err = os.ReadFile("testdata/certs/server-key.pem")
if err != nil {
return err
}
exampleCA, err = os.ReadFile("testdata/certs/ca.pem")
return err
}
var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz1234567890")
func randStringRunes(n int) string {
b := make([]rune, n)
for i := range b {
b[i] = letterRunes[rand.Intn(len(letterRunes))]
}
return string(b)
}

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
all: server-key.pem client-key.pem
all: server-key.pem
ca-key.pem: ca-csr.json
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
@ -28,13 +28,3 @@ server-key.pem: server-csr.json ca-config.json ca-key.pem
server-csr.json | cfssljson -bare server
sever.pem: server-key.pem
server.csr: server-key.pem
client-key.pem: client-csr.json ca-config.json ca-key.pem
cfssl gencert \
-ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-profile=web-servers \
client-csr.json | cfssljson -bare client
client.pem: client-key.pem
client.csr: client-key.pem

5
controllers/testdata/certs/ca-key.pem vendored Normal file
View File

@ -0,0 +1,5 @@
-----BEGIN EC PRIVATE KEY-----
MHcCAQEEIOH/u9dMcpVcZ0+X9Fc78dCTj8SHuXawhLjhu/ej64WToAoGCCqGSM49
AwEHoUQDQgAEruH/kPxtX3cyYR2G7TYmxLq6AHyzo/NGXc9XjGzdJutE2SQzn37H
dvSJbH+Lvqo9ik0uiJVRVdCYD1j7gNszGA==
-----END EC PRIVATE KEY-----

Some files were not shown because too many files have changed in this diff Show More