Compare commits
No commits in common. "main" and "v0.32.1" have entirely different histories.
|
@ -1 +1 @@
|
|||
build/
|
||||
build/libgit2/
|
||||
|
|
|
@ -1,40 +0,0 @@
|
|||
version: 2
|
||||
|
||||
updates:
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
labels: ["dependencies"]
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
groups:
|
||||
go-deps:
|
||||
patterns:
|
||||
- "*"
|
||||
allow:
|
||||
- dependency-type: "direct"
|
||||
ignore:
|
||||
# Cloud SDK are updated manually
|
||||
- dependency-name: "cloud.google.com/*"
|
||||
- dependency-name: "github.com/Azure/azure-sdk-for-go/*"
|
||||
# Kubernetes deps are updated by fluxcd/pkg/runtime
|
||||
- dependency-name: "k8s.io/*"
|
||||
- dependency-name: "sigs.k8s.io/*"
|
||||
- dependency-name: "github.com/go-logr/*"
|
||||
# OCI deps are updated by fluxcd/pkg/oci
|
||||
- dependency-name: "github.com/docker/*"
|
||||
- dependency-name: "github.com/distribution/*"
|
||||
- dependency-name: "github.com/google/go-containerregistry*"
|
||||
- dependency-name: "github.com/opencontainers/*"
|
||||
# Helm deps are updated by fluxcd/pkg/helmtestserver
|
||||
- dependency-name: "helm.sh/helm/*"
|
||||
# Flux APIs are updated at release time
|
||||
- dependency-name: "github.com/fluxcd/source-controller/api"
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
labels: ["area/ci", "dependencies"]
|
||||
groups:
|
||||
ci:
|
||||
patterns:
|
||||
- "*"
|
||||
schedule:
|
||||
interval: "monthly"
|
|
@ -1,39 +0,0 @@
|
|||
# Configuration file to declaratively configure labels
|
||||
# Ref: https://github.com/EndBug/label-sync#Config-files
|
||||
|
||||
- name: area/bucket
|
||||
description: Bucket related issues and pull requests
|
||||
color: '#00b140'
|
||||
- name: area/git
|
||||
description: Git related issues and pull requests
|
||||
color: '#863faf'
|
||||
- name: area/helm
|
||||
description: Helm related issues and pull requests
|
||||
color: '#1673b6'
|
||||
- name: area/oci
|
||||
description: OCI related issues and pull requests
|
||||
color: '#c739ff'
|
||||
- name: area/storage
|
||||
description: Storage related issues and pull requests
|
||||
color: '#4b0082'
|
||||
- name: backport:release/v1.0.x
|
||||
description: To be backported to release/v1.0.x
|
||||
color: '#ffd700'
|
||||
- name: backport:release/v1.1.x
|
||||
description: To be backported to release/v1.1.x
|
||||
color: '#ffd700'
|
||||
- name: backport:release/v1.2.x
|
||||
description: To be backported to release/v1.2.x
|
||||
color: '#ffd700'
|
||||
- name: backport:release/v1.3.x
|
||||
description: To be backported to release/v1.3.x
|
||||
color: '#ffd700'
|
||||
- name: backport:release/v1.4.x
|
||||
description: To be backported to release/v1.4.x
|
||||
color: '#ffd700'
|
||||
- name: backport:release/v1.5.x
|
||||
description: To be backported to release/v1.5.x
|
||||
color: '#ffd700'
|
||||
- name: backport:release/v1.6.x
|
||||
description: To be backported to release/v1.6.x
|
||||
color: '#ffd700'
|
|
@ -1,34 +0,0 @@
|
|||
name: backport
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [closed, labeled]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
pull-request:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
if: github.event.pull_request.state == 'closed' && github.event.pull_request.merged && (github.event_name != 'labeled' || startsWith('backport:', github.event.label.name))
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- name: Create backport PRs
|
||||
uses: korthout/backport-action@436145e922f9561fc5ea157ff406f21af2d6b363 # v3.2.0
|
||||
# xref: https://github.com/korthout/backport-action#inputs
|
||||
with:
|
||||
# Use token to allow workflows to be triggered for the created PR
|
||||
github_token: ${{ secrets.BOT_GITHUB_TOKEN }}
|
||||
# Match labels with a pattern `backport:<target-branch>`
|
||||
label_pattern: '^backport:([^ ]+)$'
|
||||
# A bit shorter pull-request title than the default
|
||||
pull_title: '[${target_branch}] ${pull_title}'
|
||||
# Simpler PR description than default
|
||||
pull_description: |-
|
||||
Automated backport to `${target_branch}`, triggered by a label in #${pull_number}.
|
|
@ -1,9 +1,8 @@
|
|||
name: fuzz
|
||||
name: CIFuzz
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'release/**'
|
||||
- main
|
||||
paths-ignore:
|
||||
- 'CHANGELOG.md'
|
||||
- 'README.md'
|
||||
|
@ -11,21 +10,28 @@ on:
|
|||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
|
||||
jobs:
|
||||
smoketest:
|
||||
Fuzzing:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
with:
|
||||
go-version: 1.24.x
|
||||
cache-dependency-path: |
|
||||
**/go.sum
|
||||
**/go.mod
|
||||
- name: Smoke test Fuzzers
|
||||
run: make fuzz-smoketest
|
||||
env:
|
||||
SKIP_COSIGN_VERIFICATION: true
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
- id: go-env
|
||||
run: |
|
||||
echo "::set-output name=go-mod-cache::$(go env GOMODCACHE)"
|
||||
- name: Restore Go cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ${{ steps.go-env.outputs.go-mod-cache }}
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go
|
||||
- name: Smoke test Fuzzers
|
||||
run: make fuzz-smoketest
|
||||
env:
|
||||
SKIP_COSIGN_VERIFICATION: true
|
||||
|
|
|
@ -1,15 +1,14 @@
|
|||
name: e2e
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'release/**'
|
||||
paths-ignore:
|
||||
- 'CHANGELOG.md'
|
||||
- 'README.md'
|
||||
- 'MAINTAINERS'
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'release/**'
|
||||
- main
|
||||
|
||||
permissions:
|
||||
contents: read # for actions/checkout to fetch code
|
||||
|
@ -20,23 +19,28 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@v3
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.24.x
|
||||
cache-dependency-path: |
|
||||
**/go.sum
|
||||
**/go.mod
|
||||
go-version: 1.19.x
|
||||
- name: Restore Go cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: /home/runner/work/_temp/_github_home/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
- name: Enable integration tests
|
||||
# Only run integration tests for main and release branches
|
||||
if: github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/heads/release/')
|
||||
# Only run integration tests for main branch
|
||||
if: github.ref == 'refs/heads/main'
|
||||
run: |
|
||||
echo 'GO_TAGS=integration' >> $GITHUB_ENV
|
||||
- name: Setup Kubernetes
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
uses: engineerd/setup-kind@v0.5.0
|
||||
with:
|
||||
cluster_name: kind
|
||||
version: v0.11.1
|
||||
image: kindest/node:v1.21.1@sha256:69860bda5563ac81e3c0057d654b5253219618a22ec3a346306239bba8cfa1a6
|
||||
- name: Setup Kustomize
|
||||
uses: fluxcd/pkg/actions/kustomize@main
|
||||
- name: Setup Helm
|
||||
|
@ -46,8 +50,42 @@ jobs:
|
|||
SKIP_COSIGN_VERIFICATION: true
|
||||
CREATE_CLUSTER: false
|
||||
run: make e2e
|
||||
- name: Print controller logs
|
||||
if: always()
|
||||
continue-on-error: true
|
||||
|
||||
kind-linux-arm64:
|
||||
# Hosted on Equinix
|
||||
# Docs: https://github.com/fluxcd/flux2/tree/main/.github/runners
|
||||
runs-on: [self-hosted, Linux, ARM64, equinix]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
- name: Enable integration tests
|
||||
# Only run integration tests for main branch
|
||||
if: github.ref == 'refs/heads/main'
|
||||
run: |
|
||||
kubectl -n source-system logs -l app=source-controller
|
||||
echo 'GO_TAGS=integration' >> $GITHUB_ENV
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
echo ::set-output name=CLUSTER::arm64-${GITHUB_SHA:0:7}-$(date +%s)
|
||||
echo ::set-output name=CONTEXT::kind-arm64-${GITHUB_SHA:0:7}-$(date +%s)
|
||||
- name: Setup Kubernetes Kind
|
||||
run: |
|
||||
kind create cluster --name ${{ steps.prep.outputs.CLUSTER }} --kubeconfig=/tmp/${{ steps.prep.outputs.CLUSTER }}
|
||||
- name: Run e2e tests
|
||||
env:
|
||||
SKIP_COSIGN_VERIFICATION: true
|
||||
KIND_CLUSTER_NAME: ${{ steps.prep.outputs.CLUSTER }}
|
||||
KUBECONFIG: /tmp/${{ steps.prep.outputs.CLUSTER }}
|
||||
CREATE_CLUSTER: false
|
||||
BUILD_PLATFORM: linux/arm64
|
||||
MINIO_TAG: RELEASE.2020-09-17T04-49-20Z-arm64
|
||||
run: make e2e
|
||||
- name: Cleanup
|
||||
if: always()
|
||||
run: |
|
||||
kind delete cluster --name ${{ steps.prep.outputs.CLUSTER }}
|
||||
rm /tmp/${{ steps.prep.outputs.CLUSTER }}
|
||||
|
|
|
@ -14,17 +14,18 @@ jobs:
|
|||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v3
|
||||
- name: Setup QEMU
|
||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
|
||||
uses: docker/setup-qemu-action@v2
|
||||
with:
|
||||
platforms: all
|
||||
- name: Setup Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||
uses: docker/setup-buildx-action@v2
|
||||
with:
|
||||
buildkitd-flags: "--debug"
|
||||
- name: Build multi-arch container image
|
||||
uses: docker/build-push-action@1dc73863535b631f98b2378be8619f83b136f4a0 # v6.17.0
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
push: false
|
||||
builder: ${{ steps.buildx.outputs.name }}
|
||||
|
|
|
@ -7,29 +7,22 @@ on:
|
|||
inputs:
|
||||
tag:
|
||||
description: 'image tag prefix'
|
||||
default: 'preview'
|
||||
default: 'rc'
|
||||
required: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
contents: write # needed to write releases
|
||||
id-token: write # needed for keyless signing
|
||||
packages: write # needed for ghcr access
|
||||
|
||||
env:
|
||||
CONTROLLER: ${{ github.event.repository.name }}
|
||||
|
||||
jobs:
|
||||
release:
|
||||
outputs:
|
||||
hashes: ${{ steps.slsa.outputs.hashes }}
|
||||
image_url: ${{ steps.slsa.outputs.image_url }}
|
||||
image_digest: ${{ steps.slsa.outputs.image_digest }}
|
||||
build-push:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write # for creating the GitHub release.
|
||||
id-token: write # for creating OIDC tokens for signing.
|
||||
packages: write # for pushing and signing container images.
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v3
|
||||
- name: Setup Kustomize
|
||||
uses: fluxcd/pkg/actions/kustomize@main
|
||||
- name: Prepare
|
||||
|
@ -39,27 +32,27 @@ jobs:
|
|||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
||||
VERSION=${GITHUB_REF/refs\/tags\//}
|
||||
fi
|
||||
echo "BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_OUTPUT
|
||||
echo "VERSION=${VERSION}" >> $GITHUB_OUTPUT
|
||||
echo ::set-output name=BUILD_DATE::$(date -u +'%Y-%m-%dT%H:%M:%SZ')
|
||||
echo ::set-output name=VERSION::${VERSION}
|
||||
- name: Setup QEMU
|
||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
|
||||
uses: docker/setup-qemu-action@v2
|
||||
- name: Setup Docker Buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: fluxcdbot
|
||||
password: ${{ secrets.GHCR_TOKEN }}
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: fluxcdbot
|
||||
password: ${{ secrets.DOCKER_FLUXCD_PASSWORD }}
|
||||
- name: Generate images meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v5.7.0
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: |
|
||||
fluxcd/${{ env.CONTROLLER }}
|
||||
|
@ -67,11 +60,8 @@ jobs:
|
|||
tags: |
|
||||
type=raw,value=${{ steps.prep.outputs.VERSION }}
|
||||
- name: Publish images
|
||||
id: build-push
|
||||
uses: docker/build-push-action@1dc73863535b631f98b2378be8619f83b136f4a0 # v6.17.0
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
sbom: true
|
||||
provenance: true
|
||||
push: true
|
||||
builder: ${{ steps.buildx.outputs.name }}
|
||||
context: .
|
||||
|
@ -79,82 +69,32 @@ jobs:
|
|||
platforms: linux/amd64,linux/arm/v7,linux/arm64
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
- uses: sigstore/cosign-installer@3454372f43399081ed03b604cb2d021dabca52bb # v3.8.2
|
||||
- name: Check images
|
||||
run: |
|
||||
docker buildx imagetools inspect docker.io/fluxcd/${{ env.CONTROLLER }}:${{ steps.prep.outputs.VERSION }}
|
||||
docker buildx imagetools inspect ghcr.io/fluxcd/${{ env.CONTROLLER }}:${{ steps.prep.outputs.VERSION }}
|
||||
docker pull docker.io/fluxcd/${{ env.CONTROLLER }}:${{ steps.prep.outputs.VERSION }}
|
||||
docker pull ghcr.io/fluxcd/${{ env.CONTROLLER }}:${{ steps.prep.outputs.VERSION }}
|
||||
- uses: sigstore/cosign-installer@main
|
||||
- name: Sign images
|
||||
env:
|
||||
COSIGN_EXPERIMENTAL: 1
|
||||
run: |
|
||||
cosign sign --yes fluxcd/${{ env.CONTROLLER }}@${{ steps.build-push.outputs.digest }}
|
||||
cosign sign --yes ghcr.io/fluxcd/${{ env.CONTROLLER }}@${{ steps.build-push.outputs.digest }}
|
||||
cosign sign fluxcd/${{ env.CONTROLLER }}:${{ steps.prep.outputs.VERSION }}
|
||||
cosign sign ghcr.io/fluxcd/${{ env.CONTROLLER }}:${{ steps.prep.outputs.VERSION }}
|
||||
- name: Generate release artifacts
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
run: |
|
||||
mkdir -p config/release
|
||||
kustomize build ./config/crd > ./config/release/${{ env.CONTROLLER }}.crds.yaml
|
||||
kustomize build ./config/manager > ./config/release/${{ env.CONTROLLER }}.deployment.yaml
|
||||
- uses: anchore/sbom-action/download-syft@e11c554f704a0b820cbf8c51673f6945e0731532 # v0.20.0
|
||||
echo '[CHANGELOG](https://github.com/fluxcd/${{ env.CONTROLLER }}/blob/main/CHANGELOG.md)' > ./config/release/notes.md
|
||||
- uses: anchore/sbom-action/download-syft@v0
|
||||
- name: Create release and SBOM
|
||||
id: run-goreleaser
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
uses: goreleaser/goreleaser-action@9c156ee8a17a598857849441385a2041ef570552 # v6.3.0
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
with:
|
||||
version: latest
|
||||
args: release --clean --skip=validate
|
||||
args: release --release-notes=config/release/notes.md --rm-dist --skip-validate
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Generate SLSA metadata
|
||||
id: slsa
|
||||
env:
|
||||
ARTIFACTS: "${{ steps.run-goreleaser.outputs.artifacts }}"
|
||||
run: |
|
||||
hashes=$(echo -E $ARTIFACTS | jq --raw-output '.[] | {name, "digest": (.extra.Digest // .extra.Checksum)} | select(.digest) | {digest} + {name} | join(" ") | sub("^sha256:";"")' | base64 -w0)
|
||||
echo "hashes=$hashes" >> $GITHUB_OUTPUT
|
||||
|
||||
image_url=fluxcd/${{ env.CONTROLLER }}:${{ steps.prep.outputs.version }}
|
||||
echo "image_url=$image_url" >> $GITHUB_OUTPUT
|
||||
|
||||
image_digest=${{ steps.build-push.outputs.digest }}
|
||||
echo "image_digest=$image_digest" >> $GITHUB_OUTPUT
|
||||
|
||||
release-provenance:
|
||||
needs: [release]
|
||||
permissions:
|
||||
actions: read # for detecting the Github Actions environment.
|
||||
id-token: write # for creating OIDC tokens for signing.
|
||||
contents: write # for uploading attestations to GitHub releases.
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v2.1.0
|
||||
with:
|
||||
provenance-name: "provenance.intoto.jsonl"
|
||||
base64-subjects: "${{ needs.release.outputs.hashes }}"
|
||||
upload-assets: true
|
||||
|
||||
dockerhub-provenance:
|
||||
needs: [release]
|
||||
permissions:
|
||||
actions: read # for detecting the Github Actions environment.
|
||||
id-token: write # for creating OIDC tokens for signing.
|
||||
packages: write # for uploading attestations.
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v2.1.0
|
||||
with:
|
||||
image: ${{ needs.release.outputs.image_url }}
|
||||
digest: ${{ needs.release.outputs.image_digest }}
|
||||
registry-username: fluxcdbot
|
||||
secrets:
|
||||
registry-password: ${{ secrets.DOCKER_FLUXCD_PASSWORD }}
|
||||
|
||||
ghcr-provenance:
|
||||
needs: [release]
|
||||
permissions:
|
||||
actions: read # for detecting the Github Actions environment.
|
||||
id-token: write # for creating OIDC tokens for signing.
|
||||
packages: write # for uploading attestations.
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
uses: slsa-framework/slsa-github-generator/.github/workflows/generator_container_slsa3.yml@v2.1.0
|
||||
with:
|
||||
image: ghcr.io/${{ needs.release.outputs.image_url }}
|
||||
digest: ${{ needs.release.outputs.image_digest }}
|
||||
registry-username: fluxcdbot
|
||||
secrets:
|
||||
registry-password: ${{ secrets.GHCR_TOKEN }}
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
name: scan
|
||||
name: Scan
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ 'main', 'release/**' ]
|
||||
branches: [ main ]
|
||||
pull_request:
|
||||
branches: [ 'main', 'release/**' ]
|
||||
branches: [ main ]
|
||||
schedule:
|
||||
- cron: '18 10 * * 3'
|
||||
|
||||
|
@ -17,10 +17,9 @@ jobs:
|
|||
name: FOSSA
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/checkout@v3
|
||||
- name: Run FOSSA scan and upload build data
|
||||
uses: fossa-contrib/fossa-action@3d2ef181b1820d6dcd1972f86a767d18167fa19b # v3.0.1
|
||||
uses: fossa-contrib/fossa-action@v1
|
||||
with:
|
||||
# FOSSA Push-Only API Token
|
||||
fossa-api-key: 5ee8bf422db1471e0bcf2bcb289185de
|
||||
|
@ -30,23 +29,17 @@ jobs:
|
|||
name: CodeQL
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.24.x
|
||||
cache-dependency-path: |
|
||||
**/go.sum
|
||||
**/go.mod
|
||||
go-version: 1.19.x
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3.28.18
|
||||
uses: github/codeql-action/init@v2
|
||||
with:
|
||||
languages: go
|
||||
# xref: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
|
||||
# xref: https://codeql.github.com/codeql-query-help/go/
|
||||
queries: security-and-quality
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3.28.18
|
||||
uses: github/codeql-action/autobuild@v2
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@ff0a06e83cb2de871e5a09832bc6a81e7276941f # v3.28.18
|
||||
uses: github/codeql-action/analyze@v2
|
||||
|
|
|
@ -1,28 +0,0 @@
|
|||
name: sync-labels
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- .github/labels.yaml
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
labels:
|
||||
name: Run sync
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
steps:
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: EndBug/label-sync@52074158190acb45f3077f9099fea818aa43f97a # v2.3.3
|
||||
with:
|
||||
# Configuration file
|
||||
config-file: |
|
||||
https://raw.githubusercontent.com/fluxcd/community/main/.github/standard-labels.yaml
|
||||
.github/labels.yaml
|
||||
# Strictly declarative
|
||||
delete-other-labels: true
|
|
@ -1,15 +1,15 @@
|
|||
name: tests
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'release/**'
|
||||
paths-ignore:
|
||||
- 'CHANGELOG.md'
|
||||
- 'README.md'
|
||||
- 'MAINTAINERS'
|
||||
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'release/**'
|
||||
- main
|
||||
|
||||
permissions:
|
||||
contents: read # for actions/checkout to fetch code
|
||||
|
@ -20,14 +20,18 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@v3
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.24.x
|
||||
cache-dependency-path: |
|
||||
**/go.sum
|
||||
**/go.mod
|
||||
go-version: 1.19.x
|
||||
- name: Restore Go cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: /home/runner/work/_temp/_github_home/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
- name: Run tests
|
||||
env:
|
||||
SKIP_COSIGN_VERIFICATION: true
|
||||
|
@ -36,22 +40,55 @@ jobs:
|
|||
run: make test
|
||||
|
||||
test-linux-arm64:
|
||||
runs-on:
|
||||
group: "ARM64"
|
||||
if: github.actor != 'dependabot[bot]'
|
||||
# Hosted on Equinix
|
||||
# Docs: https://github.com/fluxcd/flux2/tree/main/.github/runners
|
||||
runs-on: [self-hosted, Linux, ARM64, equinix]
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@v3
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.24.x
|
||||
cache-dependency-path: |
|
||||
**/go.sum
|
||||
**/go.mod
|
||||
go-version: 1.19.x
|
||||
- name: Run tests
|
||||
env:
|
||||
SKIP_COSIGN_VERIFICATION: true
|
||||
|
||||
TEST_AZURE_ACCOUNT_NAME: ${{ secrets.TEST_AZURE_ACCOUNT_NAME }}
|
||||
TEST_AZURE_ACCOUNT_KEY: ${{ secrets.TEST_AZURE_ACCOUNT_KEY }}
|
||||
|
||||
# Temporarily disabling -race for arm64 as our GitHub action
|
||||
# runners don't seem to like it. The race detection was tested
|
||||
# on both Apple M1 and Linux arm64 with successful results.
|
||||
#
|
||||
# We should reenable go test -race for arm64 runners once the
|
||||
# current issue is resolved.
|
||||
GO_TEST_ARGS: ''
|
||||
run: make test
|
||||
|
||||
# Runs 'make test' on MacOS to ensure the continuous support for contributors
|
||||
# using it as a development environment.
|
||||
darwin-amd64:
|
||||
strategy:
|
||||
matrix:
|
||||
os: [macos-12]
|
||||
fail-fast: false
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
- name: Restore Go cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: /home/runner/work/_temp/_github_home/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
- name: Run tests
|
||||
run: make test
|
||||
env:
|
||||
SKIP_COSIGN_VERIFICATION: true
|
||||
|
|
|
@ -2,13 +2,14 @@ name: verify
|
|||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'release/**'
|
||||
paths-ignore:
|
||||
- 'CHANGELOG.md'
|
||||
- 'README.md'
|
||||
- 'MAINTAINERS'
|
||||
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'release/**'
|
||||
- main
|
||||
|
||||
permissions:
|
||||
contents: read # for actions/checkout to fetch code
|
||||
|
@ -19,13 +20,17 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@v3
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.24.x
|
||||
cache-dependency-path: |
|
||||
**/go.sum
|
||||
**/go.mod
|
||||
go-version: 1.19.x
|
||||
- name: Restore Go cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: /home/runner/work/_temp/_github_home/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
- name: Verify
|
||||
run: make verify
|
||||
|
|
|
@ -4,26 +4,9 @@ builds:
|
|||
- skip: true
|
||||
|
||||
release:
|
||||
prerelease: "true"
|
||||
extra_files:
|
||||
- glob: config/release/*.yaml
|
||||
prerelease: "auto"
|
||||
header: |
|
||||
## Changelog
|
||||
|
||||
[{{.Tag}} changelog](https://github.com/fluxcd/{{.ProjectName}}/blob/{{.Tag}}/CHANGELOG.md)
|
||||
footer: |
|
||||
## Container images
|
||||
|
||||
- `docker.io/fluxcd/{{.ProjectName}}:{{.Tag}}`
|
||||
- `ghcr.io/fluxcd/{{.ProjectName}}:{{.Tag}}`
|
||||
|
||||
Supported architectures: `linux/amd64`, `linux/arm64` and `linux/arm/v7`.
|
||||
|
||||
The container images are built on GitHub hosted runners and are signed with cosign and GitHub OIDC.
|
||||
To verify the images and their provenance (SLSA level 3), please see the [security documentation](https://fluxcd.io/flux/security/).
|
||||
|
||||
changelog:
|
||||
disable: true
|
||||
|
||||
checksum:
|
||||
extra_files:
|
||||
|
@ -49,7 +32,6 @@ signs:
|
|||
certificate: "${artifact}.pem"
|
||||
args:
|
||||
- sign-blob
|
||||
- "--yes"
|
||||
- "--output-certificate=${certificate}"
|
||||
- "--output-signature=${signature}"
|
||||
- "${artifact}"
|
||||
|
|
File diff suppressed because it is too large
Load Diff
1118
CHANGELOG.md
1118
CHANGELOG.md
File diff suppressed because it is too large
Load Diff
|
@ -13,9 +13,30 @@ There are a number of dependencies required to be able to run the controller and
|
|||
- [Install Docker](https://docs.docker.com/engine/install/)
|
||||
- (Optional) [Install Kubebuilder](https://book.kubebuilder.io/quick-start.html#installation)
|
||||
|
||||
The [libgit2](https://libgit2.org/) dependency is now automatically managed by the Makefile logic.
|
||||
However, it depends on [pkg-config](https://freedesktop.org/wiki/Software/pkg-config/) being installed:
|
||||
|
||||
### macOS
|
||||
|
||||
```console
|
||||
$ # Ensure pkg-config is installed
|
||||
$ brew install pkg-config
|
||||
```
|
||||
|
||||
### Linux
|
||||
|
||||
```console
|
||||
$ # Ensure pkg-config is installed
|
||||
$ pacman -S pkgconf
|
||||
```
|
||||
|
||||
**Note:** Example shown is for Arch Linux, but likewise procedure can be
|
||||
followed using any other package manager. Some distributions may have slight
|
||||
variation of package names (e.g. `apt install -y pkg-config`).
|
||||
|
||||
In addition to the above, the following dependencies are also used by some of the `make` targets:
|
||||
|
||||
- `controller-gen` (v0.12.0)
|
||||
- `controller-gen` (v0.7.0)
|
||||
- `gen-crd-api-reference-docs` (v0.3.0)
|
||||
- `setup-envtest` (latest)
|
||||
|
||||
|
@ -24,7 +45,7 @@ If any of the above dependencies are not present on your system, the first invoc
|
|||
## How to run the test suite
|
||||
|
||||
Prerequisites:
|
||||
* Go >= 1.24
|
||||
* Go >= 1.18
|
||||
|
||||
You can run the test suite by simply doing
|
||||
|
||||
|
@ -58,7 +79,7 @@ make run
|
|||
|
||||
### Building the container image
|
||||
|
||||
Set the name of the container image to be created from the source code. This will be used
|
||||
Set the name of the container image to be created from the source code. This will be used
|
||||
when building, pushing and referring to the image on YAML files:
|
||||
|
||||
```sh
|
||||
|
@ -79,7 +100,7 @@ make docker-push
|
|||
```
|
||||
|
||||
Alternatively, the three steps above can be done in a single line:
|
||||
|
||||
|
||||
```sh
|
||||
IMG=registry-path/source-controller TAG=latest BUILD_ARGS=--push \
|
||||
make docker-build
|
||||
|
@ -128,12 +149,18 @@ Create a `.vscode/launch.json` file:
|
|||
"type": "go",
|
||||
"request": "launch",
|
||||
"mode": "auto",
|
||||
"program": "${workspaceFolder}/main.go",
|
||||
"args": ["--storage-adv-addr=:0", "--storage-path=${workspaceFolder}/bin/data"]
|
||||
"envFile": "${workspaceFolder}/build/.env",
|
||||
"program": "${workspaceFolder}/main.go"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Create the environment file containing details on how to load
|
||||
`libgit2` dependencies:
|
||||
```bash
|
||||
make env
|
||||
```
|
||||
|
||||
Start debugging by either clicking `Run` > `Start Debugging` or using
|
||||
the relevant shortcut.
|
||||
|
|
62
Dockerfile
62
Dockerfile
|
@ -1,15 +1,28 @@
|
|||
ARG GO_VERSION=1.24
|
||||
ARG XX_VERSION=1.6.1
|
||||
ARG BASE_VARIANT=alpine
|
||||
ARG GO_VERSION=1.19
|
||||
ARG XX_VERSION=1.1.2
|
||||
|
||||
ARG LIBGIT2_IMG=ghcr.io/fluxcd/golang-with-libgit2-only
|
||||
ARG LIBGIT2_TAG=v0.4.0
|
||||
|
||||
FROM ${LIBGIT2_IMG}:${LIBGIT2_TAG} AS libgit2-libs
|
||||
|
||||
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
|
||||
|
||||
# Docker buildkit multi-arch build requires golang alpine
|
||||
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS builder
|
||||
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-${BASE_VARIANT} as gostable
|
||||
|
||||
FROM gostable AS go-linux
|
||||
|
||||
# Build-base consists of build platform dependencies and xx.
|
||||
# These will be used at current arch to yield execute the cross compilations.
|
||||
FROM go-${TARGETOS} AS build-base
|
||||
|
||||
RUN apk add --no-cache clang lld pkgconfig
|
||||
|
||||
# Copy the build utilities.
|
||||
COPY --from=xx / /
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
# build-go-mod can still be cached at build platform architecture.
|
||||
FROM build-base as build-go-mod
|
||||
|
||||
# Configure workspace
|
||||
WORKDIR /workspace
|
||||
|
@ -24,25 +37,52 @@ COPY go.sum go.sum
|
|||
# Cache modules
|
||||
RUN go mod download
|
||||
|
||||
|
||||
# Build stage install per target platform
|
||||
# dependency and effectively cross compile the application.
|
||||
FROM build-go-mod as build
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
|
||||
COPY --from=libgit2-libs /usr/local/ /usr/local/
|
||||
|
||||
# Some dependencies have to installed
|
||||
# for the target platform: https://github.com/tonistiigi/xx#go--cgo
|
||||
RUN xx-apk add musl-dev gcc clang lld
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
# Copy source code
|
||||
COPY main.go main.go
|
||||
COPY controllers/ controllers/
|
||||
COPY pkg/ pkg/
|
||||
COPY internal/ internal/
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
ARG TARGETARCH
|
||||
ENV CGO_ENABLED=1
|
||||
|
||||
# build without specifing the arch
|
||||
ENV CGO_ENABLED=0
|
||||
RUN xx-go build -trimpath -a -o source-controller main.go
|
||||
# Instead of using xx-go, (cross) compile with vanilla go leveraging musl tool chain.
|
||||
RUN export PKG_CONFIG_PATH="/usr/local/$(xx-info triple)/lib/pkgconfig" && \
|
||||
export CGO_LDFLAGS="$(pkg-config --static --libs --cflags libgit2) -static -fuse-ld=lld" && \
|
||||
xx-go build \
|
||||
-ldflags "-s -w" \
|
||||
-tags 'netgo,osusergo,static_build' \
|
||||
-o /source-controller -trimpath main.go;
|
||||
|
||||
FROM alpine:3.21
|
||||
# Ensure that the binary was cross-compiled correctly to the target platform.
|
||||
RUN xx-verify --static /source-controller
|
||||
|
||||
|
||||
FROM alpine:3.16
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
RUN apk --no-cache add ca-certificates \
|
||||
&& update-ca-certificates
|
||||
|
||||
COPY --from=builder /workspace/source-controller /usr/local/bin/
|
||||
# Copy over binary from build
|
||||
COPY --from=build /source-controller /usr/local/bin/
|
||||
COPY ATTRIBUTIONS.md /
|
||||
|
||||
USER 65534:65534
|
||||
ENTRYPOINT [ "source-controller" ]
|
||||
|
|
|
@ -7,4 +7,6 @@ from the main Flux v2 git repository, as listed in
|
|||
|
||||
https://github.com/fluxcd/flux2/blob/main/MAINTAINERS
|
||||
|
||||
Dipti Pai, Microsoft <diptipai@microsoft.com> (github: @dipti-pai, slack: Dipti Pai)
|
||||
In alphabetical order:
|
||||
|
||||
Paulo Gomes, Weaveworks <paulo.gomes@weave.works> (github: @pjbgf, slack: pjbgf)
|
||||
|
|
119
Makefile
119
Makefile
|
@ -2,6 +2,10 @@
|
|||
IMG ?= fluxcd/source-controller
|
||||
TAG ?= latest
|
||||
|
||||
# Base image used to build the Go binary
|
||||
LIBGIT2_IMG ?= ghcr.io/fluxcd/golang-with-libgit2-only
|
||||
LIBGIT2_TAG ?= v0.4.0
|
||||
|
||||
# Allows for defining additional Go test args, e.g. '-tags integration'.
|
||||
GO_TEST_ARGS ?= -race
|
||||
|
||||
|
@ -29,17 +33,21 @@ REPOSITORY_ROOT := $(shell git rev-parse --show-toplevel)
|
|||
BUILD_DIR := $(REPOSITORY_ROOT)/build
|
||||
|
||||
# Other dependency versions
|
||||
ENVTEST_BIN_VERSION ?= 1.24.0
|
||||
ENVTEST_BIN_VERSION ?= 1.19.2
|
||||
|
||||
# FUZZ_TIME defines the max amount of time, in Go Duration,
|
||||
# each fuzzer should run for.
|
||||
FUZZ_TIME ?= 1m
|
||||
# Caches libgit2 versions per tag, "forcing" rebuild only when needed.
|
||||
LIBGIT2_PATH := $(BUILD_DIR)/libgit2/$(LIBGIT2_TAG)
|
||||
LIBGIT2_LIB_PATH := $(LIBGIT2_PATH)/lib
|
||||
LIBGIT2 := $(LIBGIT2_LIB_PATH)/libgit2.a
|
||||
|
||||
export CGO_ENABLED=1
|
||||
export PKG_CONFIG_PATH=$(LIBGIT2_LIB_PATH)/pkgconfig
|
||||
export CGO_LDFLAGS=$(shell PKG_CONFIG_PATH=$(PKG_CONFIG_PATH) pkg-config --libs --static --cflags libgit2 2>/dev/null)
|
||||
GO_STATIC_FLAGS=-ldflags "-s -w" -tags 'netgo,osusergo,static_build$(addprefix ,,$(GO_TAGS))'
|
||||
|
||||
# API (doc) generation utilities
|
||||
CONTROLLER_GEN_VERSION ?= v0.16.1
|
||||
GEN_API_REF_DOCS_VERSION ?= e327d0730470cbd61b06300f81c5fcf91c23c113
|
||||
CONTROLLER_GEN_VERSION ?= v0.7.0
|
||||
GEN_API_REF_DOCS_VERSION ?= v0.3.0
|
||||
|
||||
# If gobin not set, create one on ./build and add to path.
|
||||
ifeq (,$(shell go env GOBIN))
|
||||
|
@ -61,38 +69,40 @@ ifeq ($(shell uname -s),Darwin)
|
|||
ENVTEST_ARCH=amd64
|
||||
endif
|
||||
|
||||
all: manager
|
||||
all: build
|
||||
|
||||
# Build manager binary
|
||||
manager: generate fmt vet
|
||||
build: check-deps $(LIBGIT2) ## Build manager binary
|
||||
go build $(GO_STATIC_FLAGS) -o $(BUILD_DIR)/bin/manager main.go
|
||||
|
||||
KUBEBUILDER_ASSETS?="$(shell $(ENVTEST) --arch=$(ENVTEST_ARCH) use -i $(ENVTEST_KUBERNETES_VERSION) --bin-dir=$(ENVTEST_ASSETS_DIR) -p path)"
|
||||
test: install-envtest test-api ## Run all tests
|
||||
test: $(LIBGIT2) install-envtest test-api check-deps ## Run all tests
|
||||
HTTPS_PROXY="" HTTP_PROXY="" \
|
||||
KUBEBUILDER_ASSETS=$(KUBEBUILDER_ASSETS) \
|
||||
GIT_CONFIG_GLOBAL=/dev/null \
|
||||
GIT_CONFIG_NOSYSTEM=true \
|
||||
go test $(GO_STATIC_FLAGS) \
|
||||
./... \
|
||||
$(GO_TEST_ARGS) \
|
||||
-coverprofile cover.out
|
||||
|
||||
test-ctrl: install-envtest test-api ## Run controller tests
|
||||
test-ctrl: $(LIBGIT2) install-envtest test-api check-deps ## Run controller tests
|
||||
HTTPS_PROXY="" HTTP_PROXY="" \
|
||||
KUBEBUILDER_ASSETS=$(KUBEBUILDER_ASSETS) \
|
||||
GIT_CONFIG_GLOBAL=/dev/null \
|
||||
go test $(GO_STATIC_FLAGS) \
|
||||
-run "^$(GO_TEST_PREFIX).*" \
|
||||
-v ./internal/controller \
|
||||
-v ./controllers \
|
||||
-coverprofile cover.out
|
||||
|
||||
check-deps:
|
||||
ifeq ($(shell uname -s),Darwin)
|
||||
if ! command -v pkg-config &> /dev/null; then echo "pkg-config is required"; exit 1; fi
|
||||
endif
|
||||
|
||||
test-api: ## Run api tests
|
||||
cd api; go test $(GO_TEST_ARGS) ./... -coverprofile cover.out
|
||||
|
||||
run: generate fmt vet manifests ## Run against the configured Kubernetes cluster in ~/.kube/config
|
||||
@mkdir -p $(PWD)/bin/data
|
||||
go run $(GO_STATIC_FLAGS) ./main.go --storage-adv-addr=:0 --storage-path=$(PWD)/bin/data
|
||||
run: $(LIBGIT2) generate fmt vet manifests ## Run against the configured Kubernetes cluster in ~/.kube/config
|
||||
go run $(GO_STATIC_FLAGS) ./main.go
|
||||
|
||||
install: manifests ## Install CRDs into a cluster
|
||||
kustomize build config/crd | kubectl apply -f -
|
||||
|
@ -115,17 +125,18 @@ manifests: controller-gen ## Generate manifests, e.g. CRD, RBAC, etc.
|
|||
cd api; $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role paths="./..." output:crd:artifacts:config="../config/crd/bases"
|
||||
|
||||
api-docs: gen-crd-api-reference-docs ## Generate API reference documentation
|
||||
$(GEN_CRD_API_REFERENCE_DOCS) -api-dir=./api/v1 -config=./hack/api-docs/config.json -template-dir=./hack/api-docs/template -out-file=./docs/api/v1/source.md
|
||||
$(GEN_CRD_API_REFERENCE_DOCS) -api-dir=./api/v1beta2 -config=./hack/api-docs/config.json -template-dir=./hack/api-docs/template -out-file=./docs/api/source.md
|
||||
|
||||
tidy: ## Run go mod tidy
|
||||
cd api; rm -f go.sum; go mod tidy -compat=1.24
|
||||
rm -f go.sum; go mod tidy -compat=1.24
|
||||
cd api; rm -f go.sum; go mod tidy -compat=1.19
|
||||
rm -f go.sum; go mod tidy -compat=1.19
|
||||
|
||||
fmt: ## Run go fmt against code
|
||||
go fmt ./...
|
||||
cd api; go fmt ./...
|
||||
cd tests/fuzz; go fmt .
|
||||
|
||||
vet: ## Run go vet against code
|
||||
vet: $(LIBGIT2) ## Run go vet against code
|
||||
go vet ./...
|
||||
cd api; go vet ./...
|
||||
|
||||
|
@ -134,6 +145,8 @@ generate: controller-gen ## Generate API code
|
|||
|
||||
docker-build: ## Build the Docker image
|
||||
docker buildx build \
|
||||
--build-arg LIBGIT2_IMG=$(LIBGIT2_IMG) \
|
||||
--build-arg LIBGIT2_TAG=$(LIBGIT2_TAG) \
|
||||
--platform=$(BUILD_PLATFORMS) \
|
||||
-t $(IMG):$(TAG) \
|
||||
$(BUILD_ARGS) .
|
||||
|
@ -145,13 +158,13 @@ docker-push: ## Push Docker image
|
|||
CONTROLLER_GEN = $(GOBIN)/controller-gen
|
||||
.PHONY: controller-gen
|
||||
controller-gen: ## Download controller-gen locally if necessary.
|
||||
$(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_GEN_VERSION))
|
||||
$(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.8.0)
|
||||
|
||||
# Find or download gen-crd-api-reference-docs
|
||||
GEN_CRD_API_REFERENCE_DOCS = $(GOBIN)/gen-crd-api-reference-docs
|
||||
.PHONY: gen-crd-api-reference-docs
|
||||
gen-crd-api-reference-docs: ## Download gen-crd-api-reference-docs locally if necessary
|
||||
$(call go-install-tool,$(GEN_CRD_API_REFERENCE_DOCS),github.com/ahmetb/gen-crd-api-reference-docs@$(GEN_API_REF_DOCS_VERSION))
|
||||
$(call go-install-tool,$(GEN_CRD_API_REFERENCE_DOCS),github.com/ahmetb/gen-crd-api-reference-docs@3f29e6853552dcf08a8e846b1225f275ed0f3e3b)
|
||||
|
||||
ENVTEST = $(GOBIN)/setup-envtest
|
||||
.PHONY: envtest
|
||||
|
@ -166,14 +179,40 @@ install-envtest: setup-envtest ## Download envtest binaries locally.
|
|||
# setup-envtest sets anything below k8s to 0555
|
||||
chmod -R u+w $(BUILD_DIR)/testbin
|
||||
|
||||
libgit2: $(LIBGIT2) ## Detect or download libgit2 library
|
||||
|
||||
COSIGN = $(GOBIN)/cosign
|
||||
$(LIBGIT2):
|
||||
$(call go-install-tool,$(COSIGN),github.com/sigstore/cosign/cmd/cosign@latest)
|
||||
|
||||
IMG=$(LIBGIT2_IMG) TAG=$(LIBGIT2_TAG) PATH=$(PATH):$(GOBIN) ./hack/install-libraries.sh
|
||||
|
||||
|
||||
.PHONY: help
|
||||
help: ## Display this help menu
|
||||
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-20s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
|
||||
|
||||
update-attributions:
|
||||
./hack/update-attributions.sh
|
||||
|
||||
e2e:
|
||||
./hack/ci/e2e.sh
|
||||
|
||||
verify: fmt vet manifests api-docs tidy
|
||||
verify: update-attributions fmt vet manifests api-docs
|
||||
ifneq ($(shell grep -o 'LIBGIT2_IMG ?= \w.*' Makefile | cut -d ' ' -f 3):$(shell grep -o 'LIBGIT2_TAG ?= \w.*' Makefile | cut -d ' ' -f 3), \
|
||||
$(shell grep -o "LIBGIT2_IMG=\w.*" Dockerfile | cut -d'=' -f2):$(shell grep -o "LIBGIT2_TAG=\w.*" Dockerfile | cut -d'=' -f2))
|
||||
@{ \
|
||||
echo "LIBGIT2_IMG and LIBGIT2_TAG must match in both Makefile and Dockerfile"; \
|
||||
exit 1; \
|
||||
}
|
||||
endif
|
||||
ifneq ($(shell grep -o 'LIBGIT2_TAG ?= \w.*' Makefile | cut -d ' ' -f 3), $(shell grep -o "LIBGIT2_TAG=.*" tests/fuzz/oss_fuzz_build.sh | sed 's;LIBGIT2_TAG="$${LIBGIT2_TAG:-;;g' | sed 's;}";;g'))
|
||||
@{ \
|
||||
echo "LIBGIT2_TAG must match in both Makefile and tests/fuzz/oss_fuzz_build.sh"; \
|
||||
exit 1; \
|
||||
}
|
||||
endif
|
||||
|
||||
@if [ ! "$$(git status --porcelain --untracked-files=no)" = "" ]; then \
|
||||
echo "working directory is dirty:"; \
|
||||
git --no-pager diff; \
|
||||
|
@ -188,33 +227,39 @@ TMP_DIR=$$(mktemp -d) ;\
|
|||
cd $$TMP_DIR ;\
|
||||
go mod init tmp ;\
|
||||
echo "Downloading $(2)" ;\
|
||||
env -i bash -c "GOBIN=$(GOBIN) PATH=\"$(PATH)\" GOPATH=$(shell go env GOPATH) GOCACHE=$(shell go env GOCACHE) go install $(2)" ;\
|
||||
env -i bash -c "GOBIN=$(GOBIN) PATH=$(PATH) GOPATH=$(shell go env GOPATH) GOCACHE=$(shell go env GOCACHE) go install $(2)" ;\
|
||||
rm -rf $$TMP_DIR ;\
|
||||
}
|
||||
endef
|
||||
|
||||
# Build fuzzers used by oss-fuzz.
|
||||
fuzz-build:
|
||||
rm -rf $(shell pwd)/build/fuzz/
|
||||
mkdir -p $(shell pwd)/build/fuzz/out/
|
||||
# Build fuzzers
|
||||
fuzz-build: $(LIBGIT2)
|
||||
rm -rf $(BUILD_DIR)/fuzz/
|
||||
mkdir -p $(BUILD_DIR)/fuzz/out/
|
||||
|
||||
docker build . --tag local-fuzzing:latest -f tests/fuzz/Dockerfile.builder
|
||||
docker build . --pull --tag local-fuzzing:latest -f tests/fuzz/Dockerfile.builder
|
||||
docker run --rm \
|
||||
-e FUZZING_LANGUAGE=go -e SANITIZER=address \
|
||||
-e CIFUZZ_DEBUG='True' -e OSS_FUZZ_PROJECT_NAME=fluxcd \
|
||||
-v "$(shell pwd)/build/fuzz/out":/out \
|
||||
-v "$(BUILD_DIR)/fuzz/out":/out \
|
||||
local-fuzzing:latest
|
||||
|
||||
# Run each fuzzer once to ensure they will work when executed by oss-fuzz.
|
||||
fuzz-smoketest: fuzz-build
|
||||
docker run --rm \
|
||||
-v "$(shell pwd)/build/fuzz/out":/out \
|
||||
-v "$(BUILD_DIR)/fuzz/out":/out \
|
||||
-v "$(shell go env GOMODCACHE):/root/go/pkg/mod" \
|
||||
-v "$(shell pwd)/tests/fuzz/oss_fuzz_run.sh":/runner.sh \
|
||||
local-fuzzing:latest \
|
||||
bash -c "/runner.sh"
|
||||
|
||||
# Run fuzz tests for the duration set in FUZZ_TIME.
|
||||
fuzz-native:
|
||||
KUBEBUILDER_ASSETS=$(KUBEBUILDER_ASSETS) \
|
||||
FUZZ_TIME=$(FUZZ_TIME) \
|
||||
./tests/fuzz/native_go_run.sh
|
||||
# Creates an env file that can be used to load all source-controller's dependencies
|
||||
# this is handy when you want to run adhoc debug sessions on tests or start the
|
||||
# controller in a new debug session.
|
||||
env: $(LIBGIT2)
|
||||
echo 'GO_ENABLED="1"' > $(BUILD_DIR)/.env
|
||||
echo 'PKG_CONFIG_PATH="$(PKG_CONFIG_PATH)"' >> $(BUILD_DIR)/.env
|
||||
echo 'LIBRARY_PATH="$(LIBRARY_PATH)"' >> $(BUILD_DIR)/.env
|
||||
echo 'CGO_CFLAGS="$(CGO_CFLAGS)"' >> $(BUILD_DIR)/.env
|
||||
echo 'CGO_LDFLAGS="$(CGO_LDFLAGS)"' >> $(BUILD_DIR)/.env
|
||||
echo 'KUBEBUILDER_ASSETS=$(KUBEBUILDER_ASSETS)' >> $(BUILD_DIR)/.env
|
||||
echo 'GIT_CONFIG_GLOBAL=/dev/null' >> $(BUILD_DIR)/.env
|
||||
|
|
15
PROJECT
15
PROJECT
|
@ -1,21 +1,12 @@
|
|||
domain: toolkit.fluxcd.io
|
||||
repo: github.com/fluxcd/source-controller
|
||||
resources:
|
||||
- group: source
|
||||
kind: GitRepository
|
||||
version: v1
|
||||
- group: source
|
||||
kind: GitRepository
|
||||
version: v1beta2
|
||||
- group: source
|
||||
kind: HelmRepository
|
||||
version: v1
|
||||
- group: source
|
||||
kind: HelmRepository
|
||||
version: v1beta2
|
||||
- group: source
|
||||
kind: HelmChart
|
||||
version: v1
|
||||
- group: source
|
||||
kind: HelmChart
|
||||
version: v1beta2
|
||||
|
@ -37,10 +28,4 @@ resources:
|
|||
- group: source
|
||||
kind: OCIRepository
|
||||
version: v1beta2
|
||||
- group: source
|
||||
kind: Bucket
|
||||
version: v1
|
||||
- group: source
|
||||
kind: OCIRepository
|
||||
version: v1
|
||||
version: "2"
|
||||
|
|
40
README.md
40
README.md
|
@ -5,49 +5,23 @@
|
|||
[](https://goreportcard.com/report/github.com/fluxcd/source-controller)
|
||||
[](https://github.com/fluxcd/source-controller/blob/main/LICENSE)
|
||||
[](https://github.com/fluxcd/source-controller/releases)
|
||||
|
||||
|
||||
The source-controller is a Kubernetes operator, specialised in artifacts acquisition
|
||||
from external sources such as Git, OCI, Helm repositories and S3-compatible buckets.
|
||||
from external sources such as Git, Helm repositories and S3 buckets.
|
||||
The source-controller implements the
|
||||
[source.toolkit.fluxcd.io](docs/spec/README.md) API
|
||||
[source.toolkit.fluxcd.io](https://github.com/fluxcd/source-controller/tree/main/docs/spec/v1beta2) API
|
||||
and is a core component of the [GitOps toolkit](https://fluxcd.io/flux/components/).
|
||||
|
||||

|
||||
|
||||
## APIs
|
||||
Features:
|
||||
|
||||
| Kind | API Version |
|
||||
|----------------------------------------------------|-------------------------------|
|
||||
| [GitRepository](docs/spec/v1/gitrepositories.md) | `source.toolkit.fluxcd.io/v1` |
|
||||
| [OCIRepository](docs/spec/v1/ocirepositories.md) | `source.toolkit.fluxcd.io/v1` |
|
||||
| [HelmRepository](docs/spec/v1/helmrepositories.md) | `source.toolkit.fluxcd.io/v1` |
|
||||
| [HelmChart](docs/spec/v1/helmcharts.md) | `source.toolkit.fluxcd.io/v1` |
|
||||
| [Bucket](docs/spec/v1/buckets.md) | `source.toolkit.fluxcd.io/v1` |
|
||||
|
||||
## Features
|
||||
|
||||
* authenticates to sources (SSH, user/password, API token, Workload Identity)
|
||||
* validates source authenticity (PGP, Cosign, Notation)
|
||||
* authenticates to sources (SSH, user/password, API token)
|
||||
* validates source authenticity (PGP)
|
||||
* detects source changes based on update policies (semver)
|
||||
* fetches resources on-demand and on-a-schedule
|
||||
* packages the fetched resources into a well-known format (tar.gz, yaml)
|
||||
* makes the artifacts addressable by their source identifier (sha, version, ts)
|
||||
* makes the artifacts available in-cluster to interested 3rd parties
|
||||
* notifies interested 3rd parties of source changes and availability (status conditions, events, hooks)
|
||||
* reacts to Git, Helm and OCI artifacts push events (via [notification-controller](https://github.com/fluxcd/notification-controller))
|
||||
|
||||
## Guides
|
||||
|
||||
* [Get started with Flux](https://fluxcd.io/flux/get-started/)
|
||||
* [Setup Webhook Receivers](https://fluxcd.io/flux/guides/webhook-receivers/)
|
||||
* [Setup Notifications](https://fluxcd.io/flux/guides/notifications/)
|
||||
* [How to build, publish and consume OCI Artifacts with Flux](https://fluxcd.io/flux/cheatsheets/oci-artifacts/)
|
||||
|
||||
## Roadmap
|
||||
|
||||
The roadmap for the Flux family of projects can be found at <https://fluxcd.io/roadmap/>.
|
||||
|
||||
## Contributing
|
||||
|
||||
This project is Apache 2.0 licensed and accepts contributions via GitHub pull requests.
|
||||
To start contributing please see the [development guide](DEVELOPMENT.md).
|
||||
* reacts to Git push and Helm chart upload events (via [notification-controller](https://github.com/fluxcd/notification-controller))
|
||||
|
|
36
api/go.mod
36
api/go.mod
|
@ -1,35 +1,33 @@
|
|||
module github.com/fluxcd/source-controller/api
|
||||
|
||||
go 1.24.0
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/fluxcd/pkg/apis/acl v0.7.0
|
||||
github.com/fluxcd/pkg/apis/meta v1.12.0
|
||||
k8s.io/apimachinery v0.33.0
|
||||
sigs.k8s.io/controller-runtime v0.21.0
|
||||
github.com/fluxcd/pkg/apis/acl v0.1.0
|
||||
github.com/fluxcd/pkg/apis/meta v0.18.0
|
||||
k8s.io/apimachinery v0.25.4
|
||||
sigs.k8s.io/controller-runtime v0.13.1
|
||||
)
|
||||
|
||||
// Fix CVE-2022-32149
|
||||
replace golang.org/x/text => golang.org/x/text v0.4.0
|
||||
|
||||
// Fix CVE-2022-28948
|
||||
replace gopkg.in/yaml.v3 => gopkg.in/yaml.v3 v3.0.1
|
||||
|
||||
require (
|
||||
github.com/fxamacker/cbor/v2 v2.8.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
golang.org/x/net v0.40.0 // indirect
|
||||
golang.org/x/text v0.25.0 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||
golang.org/x/net v0.2.0 // indirect
|
||||
golang.org/x/text v0.4.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.7.0 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
k8s.io/klog/v2 v2.80.1 // indirect
|
||||
k8s.io/utils v0.0.0-20221108210102-8e77b1f39fe2 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
)
|
||||
|
|
123
api/go.sum
123
api/go.sum
|
@ -1,117 +1,102 @@
|
|||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/fluxcd/pkg/apis/acl v0.7.0 h1:dMhZJH+g6ZRPjs4zVOAN9vHBd1DcavFgcIFkg5ooOE0=
|
||||
github.com/fluxcd/pkg/apis/acl v0.7.0/go.mod h1:uv7pXXR/gydiX4MUwlQa7vS8JONEDztynnjTvY3JxKQ=
|
||||
github.com/fluxcd/pkg/apis/meta v1.12.0 h1:XW15TKZieC2b7MN8VS85stqZJOx+/b8jATQ/xTUhVYg=
|
||||
github.com/fluxcd/pkg/apis/meta v1.12.0/go.mod h1:+son1Va60x2eiDcTwd7lcctbI6C+K3gM7R+ULmEq1SI=
|
||||
github.com/fxamacker/cbor/v2 v2.8.0 h1:fFtUGXUzXPHTIUdne5+zzMPTfffl3RD5qYnkY40vtxU=
|
||||
github.com/fxamacker/cbor/v2 v2.8.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/fluxcd/pkg/apis/acl v0.1.0 h1:EoAl377hDQYL3WqanWCdifauXqXbMyFuK82NnX6pH4Q=
|
||||
github.com/fluxcd/pkg/apis/acl v0.1.0/go.mod h1:zfEZzz169Oap034EsDhmCAGgnWlcWmIObZjYMusoXS8=
|
||||
github.com/fluxcd/pkg/apis/meta v0.18.0 h1:s0LeulWcQ4DxVX6805vgDTxlA6bAYk+Lq1QHSnNdqLM=
|
||||
github.com/fluxcd/pkg/apis/meta v0.18.0/go.mod h1:pYvXRFi1UKNNrGR34jw3uqOnMXw9X6dTkML8j5Z7tis=
|
||||
github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI=
|
||||
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
|
||||
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg=
|
||||
github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
|
||||
github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw=
|
||||
github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
||||
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
|
||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
|
||||
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
|
||||
golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU=
|
||||
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
|
||||
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4=
|
||||
golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
|
||||
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
k8s.io/api v0.33.0 h1:yTgZVn1XEe6opVpP1FylmNrIFWuDqe2H0V8CT5gxfIU=
|
||||
k8s.io/api v0.33.0/go.mod h1:CTO61ECK/KU7haa3qq8sarQ0biLq2ju405IZAd9zsiM=
|
||||
k8s.io/apimachinery v0.33.0 h1:1a6kHrJxb2hs4t8EE5wuR/WxKDwGN1FKH3JvDtA0CIQ=
|
||||
k8s.io/apimachinery v0.33.0/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e h1:KqK5c/ghOm8xkHYhlodbp6i6+r+ChV2vuAuVRdFbLro=
|
||||
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8=
|
||||
sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM=
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
|
||||
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
|
||||
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.7.0 h1:qPeWmscJcXP0snki5IYF79Z8xrl8ETFxgMd7wez1XkI=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.7.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
k8s.io/api v0.25.0 h1:H+Q4ma2U/ww0iGB78ijZx6DRByPz6/733jIuFpX70e0=
|
||||
k8s.io/apimachinery v0.25.4 h1:CtXsuaitMESSu339tfhVXhQrPET+EiWnIY1rcurKnAc=
|
||||
k8s.io/apimachinery v0.25.4/go.mod h1:jaF9C/iPNM1FuLl7Zuy5b9v+n35HGSh6AQ4HYRkCqwo=
|
||||
k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4=
|
||||
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/utils v0.0.0-20221108210102-8e77b1f39fe2 h1:GfD9OzL11kvZN5iArC6oTS7RTj7oJOIfnislxYlqTj8=
|
||||
k8s.io/utils v0.0.0-20221108210102-8e77b1f39fe2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/controller-runtime v0.13.1 h1:tUsRCSJVM1QQOOeViGeX3GMT3dQF1eePPw6sEE3xSlg=
|
||||
sigs.k8s.io/controller-runtime v0.13.1/go.mod h1:Zbz+el8Yg31jubvAEyglRZGdLAjplZl+PgtYNI6WNTI=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
|
||||
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
|
||||
|
|
|
@ -1,93 +0,0 @@
|
|||
/*
|
||||
Copyright 2023 The Flux authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// Artifact represents the output of a Source reconciliation.
|
||||
type Artifact struct {
|
||||
// Path is the relative file path of the Artifact. It can be used to locate
|
||||
// the file in the root of the Artifact storage on the local file system of
|
||||
// the controller managing the Source.
|
||||
// +required
|
||||
Path string `json:"path"`
|
||||
|
||||
// URL is the HTTP address of the Artifact as exposed by the controller
|
||||
// managing the Source. It can be used to retrieve the Artifact for
|
||||
// consumption, e.g. by another controller applying the Artifact contents.
|
||||
// +required
|
||||
URL string `json:"url"`
|
||||
|
||||
// Revision is a human-readable identifier traceable in the origin source
|
||||
// system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.
|
||||
// +required
|
||||
Revision string `json:"revision"`
|
||||
|
||||
// Digest is the digest of the file in the form of '<algorithm>:<checksum>'.
|
||||
// +optional
|
||||
// +kubebuilder:validation:Pattern="^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$"
|
||||
Digest string `json:"digest,omitempty"`
|
||||
|
||||
// LastUpdateTime is the timestamp corresponding to the last update of the
|
||||
// Artifact.
|
||||
// +required
|
||||
LastUpdateTime metav1.Time `json:"lastUpdateTime"`
|
||||
|
||||
// Size is the number of bytes in the file.
|
||||
// +optional
|
||||
Size *int64 `json:"size,omitempty"`
|
||||
|
||||
// Metadata holds upstream information such as OCI annotations.
|
||||
// +optional
|
||||
Metadata map[string]string `json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
// HasRevision returns if the given revision matches the current Revision of
|
||||
// the Artifact.
|
||||
func (in *Artifact) HasRevision(revision string) bool {
|
||||
if in == nil {
|
||||
return false
|
||||
}
|
||||
return in.Revision == revision
|
||||
}
|
||||
|
||||
// HasDigest returns if the given digest matches the current Digest of the
|
||||
// Artifact.
|
||||
func (in *Artifact) HasDigest(digest string) bool {
|
||||
if in == nil {
|
||||
return false
|
||||
}
|
||||
return in.Digest == digest
|
||||
}
|
||||
|
||||
// ArtifactDir returns the artifact dir path in the form of
|
||||
// '<kind>/<namespace>/<name>'.
|
||||
func ArtifactDir(kind, namespace, name string) string {
|
||||
kind = strings.ToLower(kind)
|
||||
return path.Join(kind, namespace, name)
|
||||
}
|
||||
|
||||
// ArtifactPath returns the artifact path in the form of
|
||||
// '<kind>/<namespace>/name>/<filename>'.
|
||||
func ArtifactPath(kind, namespace, name, filename string) string {
|
||||
return path.Join(ArtifactDir(kind, namespace, name), filename)
|
||||
}
|
|
@ -1,271 +0,0 @@
|
|||
/*
|
||||
Copyright 2024 The Flux authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/fluxcd/pkg/apis/meta"
|
||||
)
|
||||
|
||||
const (
|
||||
// BucketKind is the string representation of a Bucket.
|
||||
BucketKind = "Bucket"
|
||||
)
|
||||
|
||||
const (
|
||||
// BucketProviderGeneric for any S3 API compatible storage Bucket.
|
||||
BucketProviderGeneric string = "generic"
|
||||
// BucketProviderAmazon for an AWS S3 object storage Bucket.
|
||||
// Provides support for retrieving credentials from the AWS EC2 service.
|
||||
BucketProviderAmazon string = "aws"
|
||||
// BucketProviderGoogle for a Google Cloud Storage Bucket.
|
||||
// Provides support for authentication using a workload identity.
|
||||
BucketProviderGoogle string = "gcp"
|
||||
// BucketProviderAzure for an Azure Blob Storage Bucket.
|
||||
// Provides support for authentication using a Service Principal,
|
||||
// Managed Identity or Shared Key.
|
||||
BucketProviderAzure string = "azure"
|
||||
)
|
||||
|
||||
// BucketSpec specifies the required configuration to produce an Artifact for
|
||||
// an object storage bucket.
|
||||
// +kubebuilder:validation:XValidation:rule="self.provider == 'aws' || self.provider == 'generic' || !has(self.sts)", message="STS configuration is only supported for the 'aws' and 'generic' Bucket providers"
|
||||
// +kubebuilder:validation:XValidation:rule="self.provider != 'aws' || !has(self.sts) || self.sts.provider == 'aws'", message="'aws' is the only supported STS provider for the 'aws' Bucket provider"
|
||||
// +kubebuilder:validation:XValidation:rule="self.provider != 'generic' || !has(self.sts) || self.sts.provider == 'ldap'", message="'ldap' is the only supported STS provider for the 'generic' Bucket provider"
|
||||
// +kubebuilder:validation:XValidation:rule="!has(self.sts) || self.sts.provider != 'aws' || !has(self.sts.secretRef)", message="spec.sts.secretRef is not required for the 'aws' STS provider"
|
||||
// +kubebuilder:validation:XValidation:rule="!has(self.sts) || self.sts.provider != 'aws' || !has(self.sts.certSecretRef)", message="spec.sts.certSecretRef is not required for the 'aws' STS provider"
|
||||
type BucketSpec struct {
|
||||
// Provider of the object storage bucket.
|
||||
// Defaults to 'generic', which expects an S3 (API) compatible object
|
||||
// storage.
|
||||
// +kubebuilder:validation:Enum=generic;aws;gcp;azure
|
||||
// +kubebuilder:default:=generic
|
||||
// +optional
|
||||
Provider string `json:"provider,omitempty"`
|
||||
|
||||
// BucketName is the name of the object storage bucket.
|
||||
// +required
|
||||
BucketName string `json:"bucketName"`
|
||||
|
||||
// Endpoint is the object storage address the BucketName is located at.
|
||||
// +required
|
||||
Endpoint string `json:"endpoint"`
|
||||
|
||||
// STS specifies the required configuration to use a Security Token
|
||||
// Service for fetching temporary credentials to authenticate in a
|
||||
// Bucket provider.
|
||||
//
|
||||
// This field is only supported for the `aws` and `generic` providers.
|
||||
// +optional
|
||||
STS *BucketSTSSpec `json:"sts,omitempty"`
|
||||
|
||||
// Insecure allows connecting to a non-TLS HTTP Endpoint.
|
||||
// +optional
|
||||
Insecure bool `json:"insecure,omitempty"`
|
||||
|
||||
// Region of the Endpoint where the BucketName is located in.
|
||||
// +optional
|
||||
Region string `json:"region,omitempty"`
|
||||
|
||||
// Prefix to use for server-side filtering of files in the Bucket.
|
||||
// +optional
|
||||
Prefix string `json:"prefix,omitempty"`
|
||||
|
||||
// SecretRef specifies the Secret containing authentication credentials
|
||||
// for the Bucket.
|
||||
// +optional
|
||||
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
|
||||
|
||||
// CertSecretRef can be given the name of a Secret containing
|
||||
// either or both of
|
||||
//
|
||||
// - a PEM-encoded client certificate (`tls.crt`) and private
|
||||
// key (`tls.key`);
|
||||
// - a PEM-encoded CA certificate (`ca.crt`)
|
||||
//
|
||||
// and whichever are supplied, will be used for connecting to the
|
||||
// bucket. The client cert and key are useful if you are
|
||||
// authenticating with a certificate; the CA cert is useful if
|
||||
// you are using a self-signed server certificate. The Secret must
|
||||
// be of type `Opaque` or `kubernetes.io/tls`.
|
||||
//
|
||||
// This field is only supported for the `generic` provider.
|
||||
// +optional
|
||||
CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"`
|
||||
|
||||
// ProxySecretRef specifies the Secret containing the proxy configuration
|
||||
// to use while communicating with the Bucket server.
|
||||
// +optional
|
||||
ProxySecretRef *meta.LocalObjectReference `json:"proxySecretRef,omitempty"`
|
||||
|
||||
// Interval at which the Bucket Endpoint is checked for updates.
|
||||
// This interval is approximate and may be subject to jitter to ensure
|
||||
// efficient use of resources.
|
||||
// +kubebuilder:validation:Type=string
|
||||
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
|
||||
// +required
|
||||
Interval metav1.Duration `json:"interval"`
|
||||
|
||||
// Timeout for fetch operations, defaults to 60s.
|
||||
// +kubebuilder:default="60s"
|
||||
// +kubebuilder:validation:Type=string
|
||||
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$"
|
||||
// +optional
|
||||
Timeout *metav1.Duration `json:"timeout,omitempty"`
|
||||
|
||||
// Ignore overrides the set of excluded patterns in the .sourceignore format
|
||||
// (which is the same as .gitignore). If not provided, a default will be used,
|
||||
// consult the documentation for your version to find out what those are.
|
||||
// +optional
|
||||
Ignore *string `json:"ignore,omitempty"`
|
||||
|
||||
// Suspend tells the controller to suspend the reconciliation of this
|
||||
// Bucket.
|
||||
// +optional
|
||||
Suspend bool `json:"suspend,omitempty"`
|
||||
}
|
||||
|
||||
// BucketSTSSpec specifies the required configuration to use a Security Token
|
||||
// Service for fetching temporary credentials to authenticate in a Bucket
|
||||
// provider.
|
||||
type BucketSTSSpec struct {
|
||||
// Provider of the Security Token Service.
|
||||
// +kubebuilder:validation:Enum=aws;ldap
|
||||
// +required
|
||||
Provider string `json:"provider"`
|
||||
|
||||
// Endpoint is the HTTP/S endpoint of the Security Token Service from
|
||||
// where temporary credentials will be fetched.
|
||||
// +required
|
||||
// +kubebuilder:validation:Pattern="^(http|https)://.*$"
|
||||
Endpoint string `json:"endpoint"`
|
||||
|
||||
// SecretRef specifies the Secret containing authentication credentials
|
||||
// for the STS endpoint. This Secret must contain the fields `username`
|
||||
// and `password` and is supported only for the `ldap` provider.
|
||||
// +optional
|
||||
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
|
||||
|
||||
// CertSecretRef can be given the name of a Secret containing
|
||||
// either or both of
|
||||
//
|
||||
// - a PEM-encoded client certificate (`tls.crt`) and private
|
||||
// key (`tls.key`);
|
||||
// - a PEM-encoded CA certificate (`ca.crt`)
|
||||
//
|
||||
// and whichever are supplied, will be used for connecting to the
|
||||
// STS endpoint. The client cert and key are useful if you are
|
||||
// authenticating with a certificate; the CA cert is useful if
|
||||
// you are using a self-signed server certificate. The Secret must
|
||||
// be of type `Opaque` or `kubernetes.io/tls`.
|
||||
//
|
||||
// This field is only supported for the `ldap` provider.
|
||||
// +optional
|
||||
CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"`
|
||||
}
|
||||
|
||||
// BucketStatus records the observed state of a Bucket.
|
||||
type BucketStatus struct {
|
||||
// ObservedGeneration is the last observed generation of the Bucket object.
|
||||
// +optional
|
||||
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
|
||||
|
||||
// Conditions holds the conditions for the Bucket.
|
||||
// +optional
|
||||
Conditions []metav1.Condition `json:"conditions,omitempty"`
|
||||
|
||||
// URL is the dynamic fetch link for the latest Artifact.
|
||||
// It is provided on a "best effort" basis, and using the precise
|
||||
// BucketStatus.Artifact data is recommended.
|
||||
// +optional
|
||||
URL string `json:"url,omitempty"`
|
||||
|
||||
// Artifact represents the last successful Bucket reconciliation.
|
||||
// +optional
|
||||
Artifact *Artifact `json:"artifact,omitempty"`
|
||||
|
||||
// ObservedIgnore is the observed exclusion patterns used for constructing
|
||||
// the source artifact.
|
||||
// +optional
|
||||
ObservedIgnore *string `json:"observedIgnore,omitempty"`
|
||||
|
||||
meta.ReconcileRequestStatus `json:",inline"`
|
||||
}
|
||||
|
||||
const (
|
||||
// BucketOperationSucceededReason signals that the Bucket listing and fetch
|
||||
// operations succeeded.
|
||||
BucketOperationSucceededReason string = "BucketOperationSucceeded"
|
||||
|
||||
// BucketOperationFailedReason signals that the Bucket listing or fetch
|
||||
// operations failed.
|
||||
BucketOperationFailedReason string = "BucketOperationFailed"
|
||||
)
|
||||
|
||||
// GetConditions returns the status conditions of the object.
|
||||
func (in *Bucket) GetConditions() []metav1.Condition {
|
||||
return in.Status.Conditions
|
||||
}
|
||||
|
||||
// SetConditions sets the status conditions on the object.
|
||||
func (in *Bucket) SetConditions(conditions []metav1.Condition) {
|
||||
in.Status.Conditions = conditions
|
||||
}
|
||||
|
||||
// GetRequeueAfter returns the duration after which the source must be reconciled again.
|
||||
func (in *Bucket) GetRequeueAfter() time.Duration {
|
||||
return in.Spec.Interval.Duration
|
||||
}
|
||||
|
||||
// GetArtifact returns the latest artifact from the source if present in the status sub-resource.
|
||||
func (in *Bucket) GetArtifact() *Artifact {
|
||||
return in.Status.Artifact
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +kubebuilder:storageversion
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:printcolumn:name="Endpoint",type=string,JSONPath=`.spec.endpoint`
|
||||
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
|
||||
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
|
||||
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
|
||||
|
||||
// Bucket is the Schema for the buckets API.
|
||||
type Bucket struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec BucketSpec `json:"spec,omitempty"`
|
||||
// +kubebuilder:default={"observedGeneration":-1}
|
||||
Status BucketStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// BucketList contains a list of Bucket objects.
|
||||
// +kubebuilder:object:root=true
|
||||
type BucketList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []Bucket `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&Bucket{}, &BucketList{})
|
||||
}
|
|
@ -1,118 +0,0 @@
|
|||
/*
|
||||
Copyright 2023 The Flux authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
const SourceFinalizer = "finalizers.fluxcd.io"
|
||||
|
||||
const (
|
||||
// ArtifactInStorageCondition indicates the availability of the Artifact in
|
||||
// the storage.
|
||||
// If True, the Artifact is stored successfully.
|
||||
// This Condition is only present on the resource if the Artifact is
|
||||
// successfully stored.
|
||||
ArtifactInStorageCondition string = "ArtifactInStorage"
|
||||
|
||||
// ArtifactOutdatedCondition indicates the current Artifact of the Source
|
||||
// is outdated.
|
||||
// This is a "negative polarity" or "abnormal-true" type, and is only
|
||||
// present on the resource if it is True.
|
||||
ArtifactOutdatedCondition string = "ArtifactOutdated"
|
||||
|
||||
// SourceVerifiedCondition indicates the integrity verification of the
|
||||
// Source.
|
||||
// If True, the integrity check succeeded. If False, it failed.
|
||||
// This Condition is only present on the resource if the integrity check
|
||||
// is enabled.
|
||||
SourceVerifiedCondition string = "SourceVerified"
|
||||
|
||||
// FetchFailedCondition indicates a transient or persistent fetch failure
|
||||
// of an upstream Source.
|
||||
// If True, observations on the upstream Source revision may be impossible,
|
||||
// and the Artifact available for the Source may be outdated.
|
||||
// This is a "negative polarity" or "abnormal-true" type, and is only
|
||||
// present on the resource if it is True.
|
||||
FetchFailedCondition string = "FetchFailed"
|
||||
|
||||
// BuildFailedCondition indicates a transient or persistent build failure
|
||||
// of a Source's Artifact.
|
||||
// If True, the Source can be in an ArtifactOutdatedCondition.
|
||||
// This is a "negative polarity" or "abnormal-true" type, and is only
|
||||
// present on the resource if it is True.
|
||||
BuildFailedCondition string = "BuildFailed"
|
||||
|
||||
// StorageOperationFailedCondition indicates a transient or persistent
|
||||
// failure related to storage. If True, the reconciliation failed while
|
||||
// performing some filesystem operation.
|
||||
// This is a "negative polarity" or "abnormal-true" type, and is only
|
||||
// present on the resource if it is True.
|
||||
StorageOperationFailedCondition string = "StorageOperationFailed"
|
||||
)
|
||||
|
||||
// Reasons are provided as utility, and not part of the declarative API.
|
||||
const (
|
||||
// URLInvalidReason signals that a given Source has an invalid URL.
|
||||
URLInvalidReason string = "URLInvalid"
|
||||
|
||||
// AuthenticationFailedReason signals that a Secret does not have the
|
||||
// required fields, or the provided credentials do not match.
|
||||
AuthenticationFailedReason string = "AuthenticationFailed"
|
||||
|
||||
// VerificationError signals that the Source's verification
|
||||
// check failed.
|
||||
VerificationError string = "VerificationError"
|
||||
|
||||
// DirCreationFailedReason signals a failure caused by a directory creation
|
||||
// operation.
|
||||
DirCreationFailedReason string = "DirectoryCreationFailed"
|
||||
|
||||
// StatOperationFailedReason signals a failure caused by a stat operation on
|
||||
// a path.
|
||||
StatOperationFailedReason string = "StatOperationFailed"
|
||||
|
||||
// ReadOperationFailedReason signals a failure caused by a read operation.
|
||||
ReadOperationFailedReason string = "ReadOperationFailed"
|
||||
|
||||
// AcquireLockFailedReason signals a failure in acquiring lock.
|
||||
AcquireLockFailedReason string = "AcquireLockFailed"
|
||||
|
||||
// InvalidPathReason signals a failure caused by an invalid path.
|
||||
InvalidPathReason string = "InvalidPath"
|
||||
|
||||
// ArchiveOperationFailedReason signals a failure in archive operation.
|
||||
ArchiveOperationFailedReason string = "ArchiveOperationFailed"
|
||||
|
||||
// SymlinkUpdateFailedReason signals a failure in updating a symlink.
|
||||
SymlinkUpdateFailedReason string = "SymlinkUpdateFailed"
|
||||
|
||||
// ArtifactUpToDateReason signals that an existing Artifact is up-to-date
|
||||
// with the Source.
|
||||
ArtifactUpToDateReason string = "ArtifactUpToDate"
|
||||
|
||||
// CacheOperationFailedReason signals a failure in cache operation.
|
||||
CacheOperationFailedReason string = "CacheOperationFailed"
|
||||
|
||||
// PatchOperationFailedReason signals a failure in patching a kubernetes API
|
||||
// object.
|
||||
PatchOperationFailedReason string = "PatchOperationFailed"
|
||||
|
||||
// InvalidSTSConfigurationReason signals that the STS configurtion is invalid.
|
||||
InvalidSTSConfigurationReason string = "InvalidSTSConfiguration"
|
||||
|
||||
// InvalidProviderConfigurationReason signals that the provider
|
||||
// configuration is invalid.
|
||||
InvalidProviderConfigurationReason string = "InvalidProviderConfiguration"
|
||||
)
|
|
@ -1,20 +0,0 @@
|
|||
/*
|
||||
Copyright 2023 The Flux authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package v1 contains API Schema definitions for the source v1 API group
|
||||
// +kubebuilder:object:generate=true
|
||||
// +groupName=source.toolkit.fluxcd.io
|
||||
package v1
|
|
@ -1,378 +0,0 @@
|
|||
/*
|
||||
Copyright 2023 The Flux authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/fluxcd/pkg/apis/meta"
|
||||
)
|
||||
|
||||
const (
|
||||
// GitRepositoryKind is the string representation of a GitRepository.
|
||||
GitRepositoryKind = "GitRepository"
|
||||
|
||||
// GitProviderGeneric provides support for authentication using
|
||||
// credentials specified in secretRef.
|
||||
GitProviderGeneric string = "generic"
|
||||
|
||||
// GitProviderAzure provides support for authentication to azure
|
||||
// repositories using Managed Identity.
|
||||
GitProviderAzure string = "azure"
|
||||
|
||||
// GitProviderGitHub provides support for authentication to git
|
||||
// repositories using GitHub App authentication
|
||||
GitProviderGitHub string = "github"
|
||||
)
|
||||
|
||||
const (
|
||||
// IncludeUnavailableCondition indicates one of the includes is not
|
||||
// available. For example, because it does not exist, or does not have an
|
||||
// Artifact.
|
||||
// This is a "negative polarity" or "abnormal-true" type, and is only
|
||||
// present on the resource if it is True.
|
||||
IncludeUnavailableCondition string = "IncludeUnavailable"
|
||||
)
|
||||
|
||||
// GitVerificationMode specifies the verification mode for a Git repository.
|
||||
type GitVerificationMode string
|
||||
|
||||
// Valid checks the validity of the Git verification mode.
|
||||
func (m GitVerificationMode) Valid() bool {
|
||||
switch m {
|
||||
case ModeGitHEAD, ModeGitTag, ModeGitTagAndHEAD:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// ModeGitHEAD implies that the HEAD of the Git repository (after it has been
|
||||
// checked out to the required commit) should be verified.
|
||||
ModeGitHEAD GitVerificationMode = "HEAD"
|
||||
// ModeGitTag implies that the tag object specified in the checkout configuration
|
||||
// should be verified.
|
||||
ModeGitTag GitVerificationMode = "Tag"
|
||||
// ModeGitTagAndHEAD implies that both the tag object and the commit it points
|
||||
// to should be verified.
|
||||
ModeGitTagAndHEAD GitVerificationMode = "TagAndHEAD"
|
||||
)
|
||||
|
||||
// GitRepositorySpec specifies the required configuration to produce an
|
||||
// Artifact for a Git repository.
|
||||
type GitRepositorySpec struct {
|
||||
// URL specifies the Git repository URL, it can be an HTTP/S or SSH address.
|
||||
// +kubebuilder:validation:Pattern="^(http|https|ssh)://.*$"
|
||||
// +required
|
||||
URL string `json:"url"`
|
||||
|
||||
// SecretRef specifies the Secret containing authentication credentials for
|
||||
// the GitRepository.
|
||||
// For HTTPS repositories the Secret must contain 'username' and 'password'
|
||||
// fields for basic auth or 'bearerToken' field for token auth.
|
||||
// For SSH repositories the Secret must contain 'identity'
|
||||
// and 'known_hosts' fields.
|
||||
// +optional
|
||||
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
|
||||
|
||||
// Provider used for authentication, can be 'azure', 'github', 'generic'.
|
||||
// When not specified, defaults to 'generic'.
|
||||
// +kubebuilder:validation:Enum=generic;azure;github
|
||||
// +optional
|
||||
Provider string `json:"provider,omitempty"`
|
||||
|
||||
// Interval at which the GitRepository URL is checked for updates.
|
||||
// This interval is approximate and may be subject to jitter to ensure
|
||||
// efficient use of resources.
|
||||
// +kubebuilder:validation:Type=string
|
||||
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
|
||||
// +required
|
||||
Interval metav1.Duration `json:"interval"`
|
||||
|
||||
// Timeout for Git operations like cloning, defaults to 60s.
|
||||
// +kubebuilder:default="60s"
|
||||
// +kubebuilder:validation:Type=string
|
||||
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$"
|
||||
// +optional
|
||||
Timeout *metav1.Duration `json:"timeout,omitempty"`
|
||||
|
||||
// Reference specifies the Git reference to resolve and monitor for
|
||||
// changes, defaults to the 'master' branch.
|
||||
// +optional
|
||||
Reference *GitRepositoryRef `json:"ref,omitempty"`
|
||||
|
||||
// Verification specifies the configuration to verify the Git commit
|
||||
// signature(s).
|
||||
// +optional
|
||||
Verification *GitRepositoryVerification `json:"verify,omitempty"`
|
||||
|
||||
// ProxySecretRef specifies the Secret containing the proxy configuration
|
||||
// to use while communicating with the Git server.
|
||||
// +optional
|
||||
ProxySecretRef *meta.LocalObjectReference `json:"proxySecretRef,omitempty"`
|
||||
|
||||
// Ignore overrides the set of excluded patterns in the .sourceignore format
|
||||
// (which is the same as .gitignore). If not provided, a default will be used,
|
||||
// consult the documentation for your version to find out what those are.
|
||||
// +optional
|
||||
Ignore *string `json:"ignore,omitempty"`
|
||||
|
||||
// Suspend tells the controller to suspend the reconciliation of this
|
||||
// GitRepository.
|
||||
// +optional
|
||||
Suspend bool `json:"suspend,omitempty"`
|
||||
|
||||
// RecurseSubmodules enables the initialization of all submodules within
|
||||
// the GitRepository as cloned from the URL, using their default settings.
|
||||
// +optional
|
||||
RecurseSubmodules bool `json:"recurseSubmodules,omitempty"`
|
||||
|
||||
// Include specifies a list of GitRepository resources which Artifacts
|
||||
// should be included in the Artifact produced for this GitRepository.
|
||||
// +optional
|
||||
Include []GitRepositoryInclude `json:"include,omitempty"`
|
||||
|
||||
// SparseCheckout specifies a list of directories to checkout when cloning
|
||||
// the repository. If specified, only these directories are included in the
|
||||
// Artifact produced for this GitRepository.
|
||||
// +optional
|
||||
SparseCheckout []string `json:"sparseCheckout,omitempty"`
|
||||
}
|
||||
|
||||
// GitRepositoryInclude specifies a local reference to a GitRepository which
|
||||
// Artifact (sub-)contents must be included, and where they should be placed.
|
||||
type GitRepositoryInclude struct {
|
||||
// GitRepositoryRef specifies the GitRepository which Artifact contents
|
||||
// must be included.
|
||||
// +required
|
||||
GitRepositoryRef meta.LocalObjectReference `json:"repository"`
|
||||
|
||||
// FromPath specifies the path to copy contents from, defaults to the root
|
||||
// of the Artifact.
|
||||
// +optional
|
||||
FromPath string `json:"fromPath,omitempty"`
|
||||
|
||||
// ToPath specifies the path to copy contents to, defaults to the name of
|
||||
// the GitRepositoryRef.
|
||||
// +optional
|
||||
ToPath string `json:"toPath,omitempty"`
|
||||
}
|
||||
|
||||
// GetFromPath returns the specified FromPath.
|
||||
func (in *GitRepositoryInclude) GetFromPath() string {
|
||||
return in.FromPath
|
||||
}
|
||||
|
||||
// GetToPath returns the specified ToPath, falling back to the name of the
|
||||
// GitRepositoryRef.
|
||||
func (in *GitRepositoryInclude) GetToPath() string {
|
||||
if in.ToPath == "" {
|
||||
return in.GitRepositoryRef.Name
|
||||
}
|
||||
return in.ToPath
|
||||
}
|
||||
|
||||
// GitRepositoryRef specifies the Git reference to resolve and checkout.
|
||||
type GitRepositoryRef struct {
|
||||
// Branch to check out, defaults to 'master' if no other field is defined.
|
||||
// +optional
|
||||
Branch string `json:"branch,omitempty"`
|
||||
|
||||
// Tag to check out, takes precedence over Branch.
|
||||
// +optional
|
||||
Tag string `json:"tag,omitempty"`
|
||||
|
||||
// SemVer tag expression to check out, takes precedence over Tag.
|
||||
// +optional
|
||||
SemVer string `json:"semver,omitempty"`
|
||||
|
||||
// Name of the reference to check out; takes precedence over Branch, Tag and SemVer.
|
||||
//
|
||||
// It must be a valid Git reference: https://git-scm.com/docs/git-check-ref-format#_description
|
||||
// Examples: "refs/heads/main", "refs/tags/v0.1.0", "refs/pull/420/head", "refs/merge-requests/1/head"
|
||||
// +optional
|
||||
Name string `json:"name,omitempty"`
|
||||
|
||||
// Commit SHA to check out, takes precedence over all reference fields.
|
||||
//
|
||||
// This can be combined with Branch to shallow clone the branch, in which
|
||||
// the commit is expected to exist.
|
||||
// +optional
|
||||
Commit string `json:"commit,omitempty"`
|
||||
}
|
||||
|
||||
// GitRepositoryVerification specifies the Git commit signature verification
|
||||
// strategy.
|
||||
type GitRepositoryVerification struct {
|
||||
// Mode specifies which Git object(s) should be verified.
|
||||
//
|
||||
// The variants "head" and "HEAD" both imply the same thing, i.e. verify
|
||||
// the commit that the HEAD of the Git repository points to. The variant
|
||||
// "head" solely exists to ensure backwards compatibility.
|
||||
// +kubebuilder:validation:Enum=head;HEAD;Tag;TagAndHEAD
|
||||
// +optional
|
||||
// +kubebuilder:default:=HEAD
|
||||
Mode GitVerificationMode `json:"mode,omitempty"`
|
||||
|
||||
// SecretRef specifies the Secret containing the public keys of trusted Git
|
||||
// authors.
|
||||
// +required
|
||||
SecretRef meta.LocalObjectReference `json:"secretRef"`
|
||||
}
|
||||
|
||||
// GitRepositoryStatus records the observed state of a Git repository.
|
||||
type GitRepositoryStatus struct {
|
||||
// ObservedGeneration is the last observed generation of the GitRepository
|
||||
// object.
|
||||
// +optional
|
||||
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
|
||||
|
||||
// Conditions holds the conditions for the GitRepository.
|
||||
// +optional
|
||||
Conditions []metav1.Condition `json:"conditions,omitempty"`
|
||||
|
||||
// Artifact represents the last successful GitRepository reconciliation.
|
||||
// +optional
|
||||
Artifact *Artifact `json:"artifact,omitempty"`
|
||||
|
||||
// IncludedArtifacts contains a list of the last successfully included
|
||||
// Artifacts as instructed by GitRepositorySpec.Include.
|
||||
// +optional
|
||||
IncludedArtifacts []*Artifact `json:"includedArtifacts,omitempty"`
|
||||
|
||||
// ObservedIgnore is the observed exclusion patterns used for constructing
|
||||
// the source artifact.
|
||||
// +optional
|
||||
ObservedIgnore *string `json:"observedIgnore,omitempty"`
|
||||
|
||||
// ObservedRecurseSubmodules is the observed resource submodules
|
||||
// configuration used to produce the current Artifact.
|
||||
// +optional
|
||||
ObservedRecurseSubmodules bool `json:"observedRecurseSubmodules,omitempty"`
|
||||
|
||||
// ObservedInclude is the observed list of GitRepository resources used to
|
||||
// produce the current Artifact.
|
||||
// +optional
|
||||
ObservedInclude []GitRepositoryInclude `json:"observedInclude,omitempty"`
|
||||
|
||||
// ObservedSparseCheckout is the observed list of directories used to
|
||||
// produce the current Artifact.
|
||||
// +optional
|
||||
ObservedSparseCheckout []string `json:"observedSparseCheckout,omitempty"`
|
||||
|
||||
// SourceVerificationMode is the last used verification mode indicating
|
||||
// which Git object(s) have been verified.
|
||||
// +optional
|
||||
SourceVerificationMode *GitVerificationMode `json:"sourceVerificationMode,omitempty"`
|
||||
|
||||
meta.ReconcileRequestStatus `json:",inline"`
|
||||
}
|
||||
|
||||
const (
|
||||
// GitOperationSucceedReason signals that a Git operation (e.g. clone,
|
||||
// checkout, etc.) succeeded.
|
||||
GitOperationSucceedReason string = "GitOperationSucceeded"
|
||||
|
||||
// GitOperationFailedReason signals that a Git operation (e.g. clone,
|
||||
// checkout, etc.) failed.
|
||||
GitOperationFailedReason string = "GitOperationFailed"
|
||||
)
|
||||
|
||||
// GetConditions returns the status conditions of the object.
|
||||
func (in GitRepository) GetConditions() []metav1.Condition {
|
||||
return in.Status.Conditions
|
||||
}
|
||||
|
||||
// SetConditions sets the status conditions on the object.
|
||||
func (in *GitRepository) SetConditions(conditions []metav1.Condition) {
|
||||
in.Status.Conditions = conditions
|
||||
}
|
||||
|
||||
// GetRequeueAfter returns the duration after which the GitRepository must be
|
||||
// reconciled again.
|
||||
func (in GitRepository) GetRequeueAfter() time.Duration {
|
||||
return in.Spec.Interval.Duration
|
||||
}
|
||||
|
||||
// GetArtifact returns the latest Artifact from the GitRepository if present in
|
||||
// the status sub-resource.
|
||||
func (in *GitRepository) GetArtifact() *Artifact {
|
||||
return in.Status.Artifact
|
||||
}
|
||||
|
||||
// GetProvider returns the Git authentication provider.
|
||||
func (v *GitRepository) GetProvider() string {
|
||||
if v.Spec.Provider == "" {
|
||||
return GitProviderGeneric
|
||||
}
|
||||
return v.Spec.Provider
|
||||
}
|
||||
|
||||
// GetMode returns the declared GitVerificationMode, or a ModeGitHEAD default.
|
||||
func (v *GitRepositoryVerification) GetMode() GitVerificationMode {
|
||||
if v.Mode.Valid() {
|
||||
return v.Mode
|
||||
}
|
||||
return ModeGitHEAD
|
||||
}
|
||||
|
||||
// VerifyHEAD returns if the configured mode instructs verification of the
|
||||
// Git HEAD.
|
||||
func (v *GitRepositoryVerification) VerifyHEAD() bool {
|
||||
return v.GetMode() == ModeGitHEAD || v.GetMode() == ModeGitTagAndHEAD
|
||||
}
|
||||
|
||||
// VerifyTag returns if the configured mode instructs verification of the
|
||||
// Git tag.
|
||||
func (v *GitRepositoryVerification) VerifyTag() bool {
|
||||
return v.GetMode() == ModeGitTag || v.GetMode() == ModeGitTagAndHEAD
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +kubebuilder:storageversion
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:resource:shortName=gitrepo
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url`
|
||||
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
|
||||
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
|
||||
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
|
||||
|
||||
// GitRepository is the Schema for the gitrepositories API.
|
||||
type GitRepository struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec GitRepositorySpec `json:"spec,omitempty"`
|
||||
// +kubebuilder:default={"observedGeneration":-1}
|
||||
Status GitRepositoryStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// GitRepositoryList contains a list of GitRepository objects.
|
||||
// +kubebuilder:object:root=true
|
||||
type GitRepositoryList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []GitRepository `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&GitRepository{}, &GitRepositoryList{})
|
||||
}
|
|
@ -1,33 +0,0 @@
|
|||
/*
|
||||
Copyright 2023 The Flux authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"sigs.k8s.io/controller-runtime/pkg/scheme"
|
||||
)
|
||||
|
||||
var (
|
||||
// GroupVersion is group version used to register these objects.
|
||||
GroupVersion = schema.GroupVersion{Group: "source.toolkit.fluxcd.io", Version: "v1"}
|
||||
|
||||
// SchemeBuilder is used to add go types to the GroupVersionKind scheme.
|
||||
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
|
||||
|
||||
// AddToScheme adds the types in this group-version to the given scheme.
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
)
|
|
@ -1,227 +0,0 @@
|
|||
/*
|
||||
Copyright 2024 The Flux authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/fluxcd/pkg/apis/meta"
|
||||
)
|
||||
|
||||
// HelmChartKind is the string representation of a HelmChart.
|
||||
const HelmChartKind = "HelmChart"
|
||||
|
||||
// HelmChartSpec specifies the desired state of a Helm chart.
|
||||
type HelmChartSpec struct {
|
||||
// Chart is the name or path the Helm chart is available at in the
|
||||
// SourceRef.
|
||||
// +required
|
||||
Chart string `json:"chart"`
|
||||
|
||||
// Version is the chart version semver expression, ignored for charts from
|
||||
// GitRepository and Bucket sources. Defaults to latest when omitted.
|
||||
// +kubebuilder:default:=*
|
||||
// +optional
|
||||
Version string `json:"version,omitempty"`
|
||||
|
||||
// SourceRef is the reference to the Source the chart is available at.
|
||||
// +required
|
||||
SourceRef LocalHelmChartSourceReference `json:"sourceRef"`
|
||||
|
||||
// Interval at which the HelmChart SourceRef is checked for updates.
|
||||
// This interval is approximate and may be subject to jitter to ensure
|
||||
// efficient use of resources.
|
||||
// +kubebuilder:validation:Type=string
|
||||
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
|
||||
// +required
|
||||
Interval metav1.Duration `json:"interval"`
|
||||
|
||||
// ReconcileStrategy determines what enables the creation of a new artifact.
|
||||
// Valid values are ('ChartVersion', 'Revision').
|
||||
// See the documentation of the values for an explanation on their behavior.
|
||||
// Defaults to ChartVersion when omitted.
|
||||
// +kubebuilder:validation:Enum=ChartVersion;Revision
|
||||
// +kubebuilder:default:=ChartVersion
|
||||
// +optional
|
||||
ReconcileStrategy string `json:"reconcileStrategy,omitempty"`
|
||||
|
||||
// ValuesFiles is an alternative list of values files to use as the chart
|
||||
// values (values.yaml is not included by default), expected to be a
|
||||
// relative path in the SourceRef.
|
||||
// Values files are merged in the order of this list with the last file
|
||||
// overriding the first. Ignored when omitted.
|
||||
// +optional
|
||||
ValuesFiles []string `json:"valuesFiles,omitempty"`
|
||||
|
||||
// IgnoreMissingValuesFiles controls whether to silently ignore missing values
|
||||
// files rather than failing.
|
||||
// +optional
|
||||
IgnoreMissingValuesFiles bool `json:"ignoreMissingValuesFiles,omitempty"`
|
||||
|
||||
// Suspend tells the controller to suspend the reconciliation of this
|
||||
// source.
|
||||
// +optional
|
||||
Suspend bool `json:"suspend,omitempty"`
|
||||
|
||||
// Verify contains the secret name containing the trusted public keys
|
||||
// used to verify the signature and specifies which provider to use to check
|
||||
// whether OCI image is authentic.
|
||||
// This field is only supported when using HelmRepository source with spec.type 'oci'.
|
||||
// Chart dependencies, which are not bundled in the umbrella chart artifact, are not verified.
|
||||
// +optional
|
||||
Verify *OCIRepositoryVerification `json:"verify,omitempty"`
|
||||
}
|
||||
|
||||
const (
|
||||
// ReconcileStrategyChartVersion reconciles when the version of the Helm chart is different.
|
||||
ReconcileStrategyChartVersion string = "ChartVersion"
|
||||
|
||||
// ReconcileStrategyRevision reconciles when the Revision of the source is different.
|
||||
ReconcileStrategyRevision string = "Revision"
|
||||
)
|
||||
|
||||
// LocalHelmChartSourceReference contains enough information to let you locate
|
||||
// the typed referenced object at namespace level.
|
||||
type LocalHelmChartSourceReference struct {
|
||||
// APIVersion of the referent.
|
||||
// +optional
|
||||
APIVersion string `json:"apiVersion,omitempty"`
|
||||
|
||||
// Kind of the referent, valid values are ('HelmRepository', 'GitRepository',
|
||||
// 'Bucket').
|
||||
// +kubebuilder:validation:Enum=HelmRepository;GitRepository;Bucket
|
||||
// +required
|
||||
Kind string `json:"kind"`
|
||||
|
||||
// Name of the referent.
|
||||
// +required
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// HelmChartStatus records the observed state of the HelmChart.
|
||||
type HelmChartStatus struct {
|
||||
// ObservedGeneration is the last observed generation of the HelmChart
|
||||
// object.
|
||||
// +optional
|
||||
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
|
||||
|
||||
// ObservedSourceArtifactRevision is the last observed Artifact.Revision
|
||||
// of the HelmChartSpec.SourceRef.
|
||||
// +optional
|
||||
ObservedSourceArtifactRevision string `json:"observedSourceArtifactRevision,omitempty"`
|
||||
|
||||
// ObservedChartName is the last observed chart name as specified by the
|
||||
// resolved chart reference.
|
||||
// +optional
|
||||
ObservedChartName string `json:"observedChartName,omitempty"`
|
||||
|
||||
// ObservedValuesFiles are the observed value files of the last successful
|
||||
// reconciliation.
|
||||
// It matches the chart in the last successfully reconciled artifact.
|
||||
// +optional
|
||||
ObservedValuesFiles []string `json:"observedValuesFiles,omitempty"`
|
||||
|
||||
// Conditions holds the conditions for the HelmChart.
|
||||
// +optional
|
||||
Conditions []metav1.Condition `json:"conditions,omitempty"`
|
||||
|
||||
// URL is the dynamic fetch link for the latest Artifact.
|
||||
// It is provided on a "best effort" basis, and using the precise
|
||||
// BucketStatus.Artifact data is recommended.
|
||||
// +optional
|
||||
URL string `json:"url,omitempty"`
|
||||
|
||||
// Artifact represents the output of the last successful reconciliation.
|
||||
// +optional
|
||||
Artifact *Artifact `json:"artifact,omitempty"`
|
||||
|
||||
meta.ReconcileRequestStatus `json:",inline"`
|
||||
}
|
||||
|
||||
const (
|
||||
// ChartPullSucceededReason signals that the pull of the Helm chart
|
||||
// succeeded.
|
||||
ChartPullSucceededReason string = "ChartPullSucceeded"
|
||||
|
||||
// ChartPackageSucceededReason signals that the package of the Helm
|
||||
// chart succeeded.
|
||||
ChartPackageSucceededReason string = "ChartPackageSucceeded"
|
||||
)
|
||||
|
||||
// GetConditions returns the status conditions of the object.
|
||||
func (in HelmChart) GetConditions() []metav1.Condition {
|
||||
return in.Status.Conditions
|
||||
}
|
||||
|
||||
// SetConditions sets the status conditions on the object.
|
||||
func (in *HelmChart) SetConditions(conditions []metav1.Condition) {
|
||||
in.Status.Conditions = conditions
|
||||
}
|
||||
|
||||
// GetRequeueAfter returns the duration after which the source must be
|
||||
// reconciled again.
|
||||
func (in HelmChart) GetRequeueAfter() time.Duration {
|
||||
return in.Spec.Interval.Duration
|
||||
}
|
||||
|
||||
// GetArtifact returns the latest artifact from the source if present in the
|
||||
// status sub-resource.
|
||||
func (in *HelmChart) GetArtifact() *Artifact {
|
||||
return in.Status.Artifact
|
||||
}
|
||||
|
||||
// GetValuesFiles returns a merged list of HelmChartSpec.ValuesFiles.
|
||||
func (in *HelmChart) GetValuesFiles() []string {
|
||||
return in.Spec.ValuesFiles
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +kubebuilder:storageversion
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:resource:shortName=hc
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:printcolumn:name="Chart",type=string,JSONPath=`.spec.chart`
|
||||
// +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.spec.version`
|
||||
// +kubebuilder:printcolumn:name="Source Kind",type=string,JSONPath=`.spec.sourceRef.kind`
|
||||
// +kubebuilder:printcolumn:name="Source Name",type=string,JSONPath=`.spec.sourceRef.name`
|
||||
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
|
||||
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
|
||||
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
|
||||
|
||||
// HelmChart is the Schema for the helmcharts API.
|
||||
type HelmChart struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec HelmChartSpec `json:"spec,omitempty"`
|
||||
// +kubebuilder:default={"observedGeneration":-1}
|
||||
Status HelmChartStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// HelmChartList contains a list of HelmChart objects.
|
||||
// +kubebuilder:object:root=true
|
||||
type HelmChartList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []HelmChart `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&HelmChart{}, &HelmChartList{})
|
||||
}
|
|
@ -1,228 +0,0 @@
|
|||
/*
|
||||
Copyright 2024 The Flux authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/fluxcd/pkg/apis/acl"
|
||||
"github.com/fluxcd/pkg/apis/meta"
|
||||
)
|
||||
|
||||
const (
|
||||
// HelmRepositoryKind is the string representation of a HelmRepository.
|
||||
HelmRepositoryKind = "HelmRepository"
|
||||
// HelmRepositoryURLIndexKey is the key used for indexing HelmRepository
|
||||
// objects by their HelmRepositorySpec.URL.
|
||||
HelmRepositoryURLIndexKey = ".metadata.helmRepositoryURL"
|
||||
// HelmRepositoryTypeDefault is the default HelmRepository type.
|
||||
// It is used when no type is specified and corresponds to a Helm repository.
|
||||
HelmRepositoryTypeDefault = "default"
|
||||
// HelmRepositoryTypeOCI is the type for an OCI repository.
|
||||
HelmRepositoryTypeOCI = "oci"
|
||||
)
|
||||
|
||||
// HelmRepositorySpec specifies the required configuration to produce an
|
||||
// Artifact for a Helm repository index YAML.
|
||||
type HelmRepositorySpec struct {
|
||||
// URL of the Helm repository, a valid URL contains at least a protocol and
|
||||
// host.
|
||||
// +kubebuilder:validation:Pattern="^(http|https|oci)://.*$"
|
||||
// +required
|
||||
URL string `json:"url"`
|
||||
|
||||
// SecretRef specifies the Secret containing authentication credentials
|
||||
// for the HelmRepository.
|
||||
// For HTTP/S basic auth the secret must contain 'username' and 'password'
|
||||
// fields.
|
||||
// Support for TLS auth using the 'certFile' and 'keyFile', and/or 'caFile'
|
||||
// keys is deprecated. Please use `.spec.certSecretRef` instead.
|
||||
// +optional
|
||||
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
|
||||
|
||||
// CertSecretRef can be given the name of a Secret containing
|
||||
// either or both of
|
||||
//
|
||||
// - a PEM-encoded client certificate (`tls.crt`) and private
|
||||
// key (`tls.key`);
|
||||
// - a PEM-encoded CA certificate (`ca.crt`)
|
||||
//
|
||||
// and whichever are supplied, will be used for connecting to the
|
||||
// registry. The client cert and key are useful if you are
|
||||
// authenticating with a certificate; the CA cert is useful if
|
||||
// you are using a self-signed server certificate. The Secret must
|
||||
// be of type `Opaque` or `kubernetes.io/tls`.
|
||||
//
|
||||
// It takes precedence over the values specified in the Secret referred
|
||||
// to by `.spec.secretRef`.
|
||||
// +optional
|
||||
CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"`
|
||||
|
||||
// PassCredentials allows the credentials from the SecretRef to be passed
|
||||
// on to a host that does not match the host as defined in URL.
|
||||
// This may be required if the host of the advertised chart URLs in the
|
||||
// index differ from the defined URL.
|
||||
// Enabling this should be done with caution, as it can potentially result
|
||||
// in credentials getting stolen in a MITM-attack.
|
||||
// +optional
|
||||
PassCredentials bool `json:"passCredentials,omitempty"`
|
||||
|
||||
// Interval at which the HelmRepository URL is checked for updates.
|
||||
// This interval is approximate and may be subject to jitter to ensure
|
||||
// efficient use of resources.
|
||||
// +kubebuilder:validation:Type=string
|
||||
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
|
||||
// +optional
|
||||
Interval metav1.Duration `json:"interval,omitempty"`
|
||||
|
||||
// Insecure allows connecting to a non-TLS HTTP container registry.
|
||||
// This field is only taken into account if the .spec.type field is set to 'oci'.
|
||||
// +optional
|
||||
Insecure bool `json:"insecure,omitempty"`
|
||||
|
||||
// Timeout is used for the index fetch operation for an HTTPS helm repository,
|
||||
// and for remote OCI Repository operations like pulling for an OCI helm
|
||||
// chart by the associated HelmChart.
|
||||
// Its default value is 60s.
|
||||
// +kubebuilder:validation:Type=string
|
||||
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$"
|
||||
// +optional
|
||||
Timeout *metav1.Duration `json:"timeout,omitempty"`
|
||||
|
||||
// Suspend tells the controller to suspend the reconciliation of this
|
||||
// HelmRepository.
|
||||
// +optional
|
||||
Suspend bool `json:"suspend,omitempty"`
|
||||
|
||||
// AccessFrom specifies an Access Control List for allowing cross-namespace
|
||||
// references to this object.
|
||||
// NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092
|
||||
// +optional
|
||||
AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"`
|
||||
|
||||
// Type of the HelmRepository.
|
||||
// When this field is set to "oci", the URL field value must be prefixed with "oci://".
|
||||
// +kubebuilder:validation:Enum=default;oci
|
||||
// +optional
|
||||
Type string `json:"type,omitempty"`
|
||||
|
||||
// Provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'.
|
||||
// This field is optional, and only taken into account if the .spec.type field is set to 'oci'.
|
||||
// When not specified, defaults to 'generic'.
|
||||
// +kubebuilder:validation:Enum=generic;aws;azure;gcp
|
||||
// +kubebuilder:default:=generic
|
||||
// +optional
|
||||
Provider string `json:"provider,omitempty"`
|
||||
}
|
||||
|
||||
// HelmRepositoryStatus records the observed state of the HelmRepository.
|
||||
type HelmRepositoryStatus struct {
|
||||
// ObservedGeneration is the last observed generation of the HelmRepository
|
||||
// object.
|
||||
// +optional
|
||||
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
|
||||
|
||||
// Conditions holds the conditions for the HelmRepository.
|
||||
// +optional
|
||||
Conditions []metav1.Condition `json:"conditions,omitempty"`
|
||||
|
||||
// URL is the dynamic fetch link for the latest Artifact.
|
||||
// It is provided on a "best effort" basis, and using the precise
|
||||
// HelmRepositoryStatus.Artifact data is recommended.
|
||||
// +optional
|
||||
URL string `json:"url,omitempty"`
|
||||
|
||||
// Artifact represents the last successful HelmRepository reconciliation.
|
||||
// +optional
|
||||
Artifact *Artifact `json:"artifact,omitempty"`
|
||||
|
||||
meta.ReconcileRequestStatus `json:",inline"`
|
||||
}
|
||||
|
||||
const (
|
||||
// IndexationFailedReason signals that the HelmRepository index fetch
|
||||
// failed.
|
||||
IndexationFailedReason string = "IndexationFailed"
|
||||
)
|
||||
|
||||
// GetConditions returns the status conditions of the object.
|
||||
func (in HelmRepository) GetConditions() []metav1.Condition {
|
||||
return in.Status.Conditions
|
||||
}
|
||||
|
||||
// SetConditions sets the status conditions on the object.
|
||||
func (in *HelmRepository) SetConditions(conditions []metav1.Condition) {
|
||||
in.Status.Conditions = conditions
|
||||
}
|
||||
|
||||
// GetRequeueAfter returns the duration after which the source must be
|
||||
// reconciled again.
|
||||
func (in HelmRepository) GetRequeueAfter() time.Duration {
|
||||
if in.Spec.Interval.Duration != 0 {
|
||||
return in.Spec.Interval.Duration
|
||||
}
|
||||
return time.Minute
|
||||
}
|
||||
|
||||
// GetTimeout returns the timeout duration used for various operations related
|
||||
// to this HelmRepository.
|
||||
func (in HelmRepository) GetTimeout() time.Duration {
|
||||
if in.Spec.Timeout != nil {
|
||||
return in.Spec.Timeout.Duration
|
||||
}
|
||||
return time.Minute
|
||||
}
|
||||
|
||||
// GetArtifact returns the latest artifact from the source if present in the
|
||||
// status sub-resource.
|
||||
func (in *HelmRepository) GetArtifact() *Artifact {
|
||||
return in.Status.Artifact
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +kubebuilder:storageversion
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:resource:shortName=helmrepo
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url`
|
||||
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
|
||||
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
|
||||
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
|
||||
|
||||
// HelmRepository is the Schema for the helmrepositories API.
|
||||
type HelmRepository struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec HelmRepositorySpec `json:"spec,omitempty"`
|
||||
// +kubebuilder:default={"observedGeneration":-1}
|
||||
Status HelmRepositoryStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// HelmRepositoryList contains a list of HelmRepository objects.
|
||||
// +kubebuilder:object:root=true
|
||||
type HelmRepositoryList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []HelmRepository `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&HelmRepository{}, &HelmRepositoryList{})
|
||||
}
|
|
@ -1,296 +0,0 @@
|
|||
/*
|
||||
Copyright 2025 The Flux authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/fluxcd/pkg/apis/meta"
|
||||
)
|
||||
|
||||
const (
|
||||
// OCIRepositoryKind is the string representation of an OCIRepository.
|
||||
OCIRepositoryKind = "OCIRepository"
|
||||
|
||||
// OCIRepositoryPrefix is the prefix used for OCIRepository URLs.
|
||||
OCIRepositoryPrefix = "oci://"
|
||||
|
||||
// GenericOCIProvider provides support for authentication using static credentials
|
||||
// for any OCI compatible API such as Docker Registry, GitHub Container Registry,
|
||||
// Docker Hub, Quay, etc.
|
||||
GenericOCIProvider string = "generic"
|
||||
|
||||
// AmazonOCIProvider provides support for OCI authentication using AWS IRSA.
|
||||
AmazonOCIProvider string = "aws"
|
||||
|
||||
// GoogleOCIProvider provides support for OCI authentication using GCP workload identity.
|
||||
GoogleOCIProvider string = "gcp"
|
||||
|
||||
// AzureOCIProvider provides support for OCI authentication using a Azure Service Principal,
|
||||
// Managed Identity or Shared Key.
|
||||
AzureOCIProvider string = "azure"
|
||||
|
||||
// OCILayerExtract defines the operation type for extracting the content from an OCI artifact layer.
|
||||
OCILayerExtract = "extract"
|
||||
|
||||
// OCILayerCopy defines the operation type for copying the content from an OCI artifact layer.
|
||||
OCILayerCopy = "copy"
|
||||
)
|
||||
|
||||
// OCIRepositorySpec defines the desired state of OCIRepository
|
||||
type OCIRepositorySpec struct {
|
||||
// URL is a reference to an OCI artifact repository hosted
|
||||
// on a remote container registry.
|
||||
// +kubebuilder:validation:Pattern="^oci://.*$"
|
||||
// +required
|
||||
URL string `json:"url"`
|
||||
|
||||
// The OCI reference to pull and monitor for changes,
|
||||
// defaults to the latest tag.
|
||||
// +optional
|
||||
Reference *OCIRepositoryRef `json:"ref,omitempty"`
|
||||
|
||||
// LayerSelector specifies which layer should be extracted from the OCI artifact.
|
||||
// When not specified, the first layer found in the artifact is selected.
|
||||
// +optional
|
||||
LayerSelector *OCILayerSelector `json:"layerSelector,omitempty"`
|
||||
|
||||
// The provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'.
|
||||
// When not specified, defaults to 'generic'.
|
||||
// +kubebuilder:validation:Enum=generic;aws;azure;gcp
|
||||
// +kubebuilder:default:=generic
|
||||
// +optional
|
||||
Provider string `json:"provider,omitempty"`
|
||||
|
||||
// SecretRef contains the secret name containing the registry login
|
||||
// credentials to resolve image metadata.
|
||||
// The secret must be of type kubernetes.io/dockerconfigjson.
|
||||
// +optional
|
||||
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
|
||||
|
||||
// Verify contains the secret name containing the trusted public keys
|
||||
// used to verify the signature and specifies which provider to use to check
|
||||
// whether OCI image is authentic.
|
||||
// +optional
|
||||
Verify *OCIRepositoryVerification `json:"verify,omitempty"`
|
||||
|
||||
// ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate
|
||||
// the image pull if the service account has attached pull secrets. For more information:
|
||||
// https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account
|
||||
// +optional
|
||||
ServiceAccountName string `json:"serviceAccountName,omitempty"`
|
||||
|
||||
// CertSecretRef can be given the name of a Secret containing
|
||||
// either or both of
|
||||
//
|
||||
// - a PEM-encoded client certificate (`tls.crt`) and private
|
||||
// key (`tls.key`);
|
||||
// - a PEM-encoded CA certificate (`ca.crt`)
|
||||
//
|
||||
// and whichever are supplied, will be used for connecting to the
|
||||
// registry. The client cert and key are useful if you are
|
||||
// authenticating with a certificate; the CA cert is useful if
|
||||
// you are using a self-signed server certificate. The Secret must
|
||||
// be of type `Opaque` or `kubernetes.io/tls`.
|
||||
// +optional
|
||||
CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"`
|
||||
|
||||
// ProxySecretRef specifies the Secret containing the proxy configuration
|
||||
// to use while communicating with the container registry.
|
||||
// +optional
|
||||
ProxySecretRef *meta.LocalObjectReference `json:"proxySecretRef,omitempty"`
|
||||
|
||||
// Interval at which the OCIRepository URL is checked for updates.
|
||||
// This interval is approximate and may be subject to jitter to ensure
|
||||
// efficient use of resources.
|
||||
// +kubebuilder:validation:Type=string
|
||||
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
|
||||
// +required
|
||||
Interval metav1.Duration `json:"interval"`
|
||||
|
||||
// The timeout for remote OCI Repository operations like pulling, defaults to 60s.
|
||||
// +kubebuilder:default="60s"
|
||||
// +kubebuilder:validation:Type=string
|
||||
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$"
|
||||
// +optional
|
||||
Timeout *metav1.Duration `json:"timeout,omitempty"`
|
||||
|
||||
// Ignore overrides the set of excluded patterns in the .sourceignore format
|
||||
// (which is the same as .gitignore). If not provided, a default will be used,
|
||||
// consult the documentation for your version to find out what those are.
|
||||
// +optional
|
||||
Ignore *string `json:"ignore,omitempty"`
|
||||
|
||||
// Insecure allows connecting to a non-TLS HTTP container registry.
|
||||
// +optional
|
||||
Insecure bool `json:"insecure,omitempty"`
|
||||
|
||||
// This flag tells the controller to suspend the reconciliation of this source.
|
||||
// +optional
|
||||
Suspend bool `json:"suspend,omitempty"`
|
||||
}
|
||||
|
||||
// OCIRepositoryRef defines the image reference for the OCIRepository's URL
|
||||
type OCIRepositoryRef struct {
|
||||
// Digest is the image digest to pull, takes precedence over SemVer.
|
||||
// The value should be in the format 'sha256:<HASH>'.
|
||||
// +optional
|
||||
Digest string `json:"digest,omitempty"`
|
||||
|
||||
// SemVer is the range of tags to pull selecting the latest within
|
||||
// the range, takes precedence over Tag.
|
||||
// +optional
|
||||
SemVer string `json:"semver,omitempty"`
|
||||
|
||||
// SemverFilter is a regex pattern to filter the tags within the SemVer range.
|
||||
// +optional
|
||||
SemverFilter string `json:"semverFilter,omitempty"`
|
||||
|
||||
// Tag is the image tag to pull, defaults to latest.
|
||||
// +optional
|
||||
Tag string `json:"tag,omitempty"`
|
||||
}
|
||||
|
||||
// OCILayerSelector specifies which layer should be extracted from an OCI Artifact
|
||||
type OCILayerSelector struct {
|
||||
// MediaType specifies the OCI media type of the layer
|
||||
// which should be extracted from the OCI Artifact. The
|
||||
// first layer matching this type is selected.
|
||||
// +optional
|
||||
MediaType string `json:"mediaType,omitempty"`
|
||||
|
||||
// Operation specifies how the selected layer should be processed.
|
||||
// By default, the layer compressed content is extracted to storage.
|
||||
// When the operation is set to 'copy', the layer compressed content
|
||||
// is persisted to storage as it is.
|
||||
// +kubebuilder:validation:Enum=extract;copy
|
||||
// +optional
|
||||
Operation string `json:"operation,omitempty"`
|
||||
}
|
||||
|
||||
// OCIRepositoryStatus defines the observed state of OCIRepository
|
||||
type OCIRepositoryStatus struct {
|
||||
// ObservedGeneration is the last observed generation.
|
||||
// +optional
|
||||
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
|
||||
|
||||
// Conditions holds the conditions for the OCIRepository.
|
||||
// +optional
|
||||
Conditions []metav1.Condition `json:"conditions,omitempty"`
|
||||
|
||||
// URL is the download link for the artifact output of the last OCI Repository sync.
|
||||
// +optional
|
||||
URL string `json:"url,omitempty"`
|
||||
|
||||
// Artifact represents the output of the last successful OCI Repository sync.
|
||||
// +optional
|
||||
Artifact *Artifact `json:"artifact,omitempty"`
|
||||
|
||||
// ObservedIgnore is the observed exclusion patterns used for constructing
|
||||
// the source artifact.
|
||||
// +optional
|
||||
ObservedIgnore *string `json:"observedIgnore,omitempty"`
|
||||
|
||||
// ObservedLayerSelector is the observed layer selector used for constructing
|
||||
// the source artifact.
|
||||
// +optional
|
||||
ObservedLayerSelector *OCILayerSelector `json:"observedLayerSelector,omitempty"`
|
||||
|
||||
meta.ReconcileRequestStatus `json:",inline"`
|
||||
}
|
||||
|
||||
const (
|
||||
// OCIPullFailedReason signals that a pull operation failed.
|
||||
OCIPullFailedReason string = "OCIArtifactPullFailed"
|
||||
|
||||
// OCILayerOperationFailedReason signals that an OCI layer operation failed.
|
||||
OCILayerOperationFailedReason string = "OCIArtifactLayerOperationFailed"
|
||||
)
|
||||
|
||||
// GetConditions returns the status conditions of the object.
|
||||
func (in OCIRepository) GetConditions() []metav1.Condition {
|
||||
return in.Status.Conditions
|
||||
}
|
||||
|
||||
// SetConditions sets the status conditions on the object.
|
||||
func (in *OCIRepository) SetConditions(conditions []metav1.Condition) {
|
||||
in.Status.Conditions = conditions
|
||||
}
|
||||
|
||||
// GetRequeueAfter returns the duration after which the OCIRepository must be
|
||||
// reconciled again.
|
||||
func (in OCIRepository) GetRequeueAfter() time.Duration {
|
||||
return in.Spec.Interval.Duration
|
||||
}
|
||||
|
||||
// GetArtifact returns the latest Artifact from the OCIRepository if present in
|
||||
// the status sub-resource.
|
||||
func (in *OCIRepository) GetArtifact() *Artifact {
|
||||
return in.Status.Artifact
|
||||
}
|
||||
|
||||
// GetLayerMediaType returns the media type layer selector if found in spec.
|
||||
func (in *OCIRepository) GetLayerMediaType() string {
|
||||
if in.Spec.LayerSelector == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return in.Spec.LayerSelector.MediaType
|
||||
}
|
||||
|
||||
// GetLayerOperation returns the layer selector operation (defaults to extract).
|
||||
func (in *OCIRepository) GetLayerOperation() string {
|
||||
if in.Spec.LayerSelector == nil || in.Spec.LayerSelector.Operation == "" {
|
||||
return OCILayerExtract
|
||||
}
|
||||
|
||||
return in.Spec.LayerSelector.Operation
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +kubebuilder:storageversion
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:resource:shortName=ocirepo
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url`
|
||||
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
|
||||
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
|
||||
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
|
||||
|
||||
// OCIRepository is the Schema for the ocirepositories API
|
||||
type OCIRepository struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec OCIRepositorySpec `json:"spec,omitempty"`
|
||||
// +kubebuilder:default={"observedGeneration":-1}
|
||||
Status OCIRepositoryStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// OCIRepositoryList contains a list of OCIRepository
|
||||
// +kubebuilder:object:root=true
|
||||
type OCIRepositoryList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []OCIRepository `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&OCIRepository{}, &OCIRepositoryList{})
|
||||
}
|
|
@ -1,56 +0,0 @@
|
|||
/*
|
||||
Copyright 2024 The Flux authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"github.com/fluxcd/pkg/apis/meta"
|
||||
)
|
||||
|
||||
// OCIRepositoryVerification verifies the authenticity of an OCI Artifact
|
||||
type OCIRepositoryVerification struct {
|
||||
// Provider specifies the technology used to sign the OCI Artifact.
|
||||
// +kubebuilder:validation:Enum=cosign;notation
|
||||
// +kubebuilder:default:=cosign
|
||||
Provider string `json:"provider"`
|
||||
|
||||
// SecretRef specifies the Kubernetes Secret containing the
|
||||
// trusted public keys.
|
||||
// +optional
|
||||
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
|
||||
|
||||
// MatchOIDCIdentity specifies the identity matching criteria to use
|
||||
// while verifying an OCI artifact which was signed using Cosign keyless
|
||||
// signing. The artifact's identity is deemed to be verified if any of the
|
||||
// specified matchers match against the identity.
|
||||
// +optional
|
||||
MatchOIDCIdentity []OIDCIdentityMatch `json:"matchOIDCIdentity,omitempty"`
|
||||
}
|
||||
|
||||
// OIDCIdentityMatch specifies options for verifying the certificate identity,
|
||||
// i.e. the issuer and the subject of the certificate.
|
||||
type OIDCIdentityMatch struct {
|
||||
// Issuer specifies the regex pattern to match against to verify
|
||||
// the OIDC issuer in the Fulcio certificate. The pattern must be a
|
||||
// valid Go regular expression.
|
||||
// +required
|
||||
Issuer string `json:"issuer"`
|
||||
// Subject specifies the regex pattern to match against to verify
|
||||
// the identity subject in the Fulcio certificate. The pattern must
|
||||
// be a valid Go regular expression.
|
||||
// +required
|
||||
Subject string `json:"subject"`
|
||||
}
|
|
@ -1,45 +0,0 @@
|
|||
/*
|
||||
Copyright 2023 The Flux authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
const (
|
||||
// SourceIndexKey is the key used for indexing objects based on their
|
||||
// referenced Source.
|
||||
SourceIndexKey string = ".metadata.source"
|
||||
)
|
||||
|
||||
// Source interface must be supported by all API types.
|
||||
// Source is the interface that provides generic access to the Artifact and
|
||||
// interval. It must be supported by all kinds of the source.toolkit.fluxcd.io
|
||||
// API group.
|
||||
//
|
||||
// +k8s:deepcopy-gen=false
|
||||
type Source interface {
|
||||
runtime.Object
|
||||
// GetRequeueAfter returns the duration after which the source must be
|
||||
// reconciled again.
|
||||
GetRequeueAfter() time.Duration
|
||||
// GetArtifact returns the latest artifact from the source if present in
|
||||
// the status sub-resource.
|
||||
GetArtifact() *Artifact
|
||||
}
|
|
@ -1,26 +0,0 @@
|
|||
/*
|
||||
Copyright 2024 The Flux authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
const (
|
||||
// STSProviderAmazon represents the AWS provider for Security Token Service.
|
||||
// Provides support for fetching temporary credentials from an AWS STS endpoint.
|
||||
STSProviderAmazon string = "aws"
|
||||
// STSProviderLDAP represents the LDAP provider for Security Token Service.
|
||||
// Provides support for fetching temporary credentials from an LDAP endpoint.
|
||||
STSProviderLDAP string = "ldap"
|
||||
)
|
|
@ -1,920 +0,0 @@
|
|||
//go:build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Flux authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by controller-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"github.com/fluxcd/pkg/apis/acl"
|
||||
"github.com/fluxcd/pkg/apis/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Artifact) DeepCopyInto(out *Artifact) {
|
||||
*out = *in
|
||||
in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
|
||||
if in.Size != nil {
|
||||
in, out := &in.Size, &out.Size
|
||||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
if in.Metadata != nil {
|
||||
in, out := &in.Metadata, &out.Metadata
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Artifact.
|
||||
func (in *Artifact) DeepCopy() *Artifact {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Artifact)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Bucket) DeepCopyInto(out *Bucket) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Bucket.
|
||||
func (in *Bucket) DeepCopy() *Bucket {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Bucket)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *Bucket) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *BucketList) DeepCopyInto(out *BucketList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]Bucket, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketList.
|
||||
func (in *BucketList) DeepCopy() *BucketList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(BucketList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *BucketList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *BucketSTSSpec) DeepCopyInto(out *BucketSTSSpec) {
|
||||
*out = *in
|
||||
if in.SecretRef != nil {
|
||||
in, out := &in.SecretRef, &out.SecretRef
|
||||
*out = new(meta.LocalObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
if in.CertSecretRef != nil {
|
||||
in, out := &in.CertSecretRef, &out.CertSecretRef
|
||||
*out = new(meta.LocalObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketSTSSpec.
|
||||
func (in *BucketSTSSpec) DeepCopy() *BucketSTSSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(BucketSTSSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *BucketSpec) DeepCopyInto(out *BucketSpec) {
|
||||
*out = *in
|
||||
if in.STS != nil {
|
||||
in, out := &in.STS, &out.STS
|
||||
*out = new(BucketSTSSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.SecretRef != nil {
|
||||
in, out := &in.SecretRef, &out.SecretRef
|
||||
*out = new(meta.LocalObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
if in.CertSecretRef != nil {
|
||||
in, out := &in.CertSecretRef, &out.CertSecretRef
|
||||
*out = new(meta.LocalObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
if in.ProxySecretRef != nil {
|
||||
in, out := &in.ProxySecretRef, &out.ProxySecretRef
|
||||
*out = new(meta.LocalObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
out.Interval = in.Interval
|
||||
if in.Timeout != nil {
|
||||
in, out := &in.Timeout, &out.Timeout
|
||||
*out = new(metav1.Duration)
|
||||
**out = **in
|
||||
}
|
||||
if in.Ignore != nil {
|
||||
in, out := &in.Ignore, &out.Ignore
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketSpec.
|
||||
func (in *BucketSpec) DeepCopy() *BucketSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(BucketSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *BucketStatus) DeepCopyInto(out *BucketStatus) {
|
||||
*out = *in
|
||||
if in.Conditions != nil {
|
||||
in, out := &in.Conditions, &out.Conditions
|
||||
*out = make([]metav1.Condition, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.Artifact != nil {
|
||||
in, out := &in.Artifact, &out.Artifact
|
||||
*out = new(Artifact)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ObservedIgnore != nil {
|
||||
in, out := &in.ObservedIgnore, &out.ObservedIgnore
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
out.ReconcileRequestStatus = in.ReconcileRequestStatus
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketStatus.
|
||||
func (in *BucketStatus) DeepCopy() *BucketStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(BucketStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *GitRepository) DeepCopyInto(out *GitRepository) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepository.
|
||||
func (in *GitRepository) DeepCopy() *GitRepository {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(GitRepository)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *GitRepository) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *GitRepositoryInclude) DeepCopyInto(out *GitRepositoryInclude) {
|
||||
*out = *in
|
||||
out.GitRepositoryRef = in.GitRepositoryRef
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryInclude.
|
||||
func (in *GitRepositoryInclude) DeepCopy() *GitRepositoryInclude {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(GitRepositoryInclude)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *GitRepositoryList) DeepCopyInto(out *GitRepositoryList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]GitRepository, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryList.
|
||||
func (in *GitRepositoryList) DeepCopy() *GitRepositoryList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(GitRepositoryList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *GitRepositoryList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *GitRepositoryRef) DeepCopyInto(out *GitRepositoryRef) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryRef.
|
||||
func (in *GitRepositoryRef) DeepCopy() *GitRepositoryRef {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(GitRepositoryRef)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *GitRepositorySpec) DeepCopyInto(out *GitRepositorySpec) {
|
||||
*out = *in
|
||||
if in.SecretRef != nil {
|
||||
in, out := &in.SecretRef, &out.SecretRef
|
||||
*out = new(meta.LocalObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
out.Interval = in.Interval
|
||||
if in.Timeout != nil {
|
||||
in, out := &in.Timeout, &out.Timeout
|
||||
*out = new(metav1.Duration)
|
||||
**out = **in
|
||||
}
|
||||
if in.Reference != nil {
|
||||
in, out := &in.Reference, &out.Reference
|
||||
*out = new(GitRepositoryRef)
|
||||
**out = **in
|
||||
}
|
||||
if in.Verification != nil {
|
||||
in, out := &in.Verification, &out.Verification
|
||||
*out = new(GitRepositoryVerification)
|
||||
**out = **in
|
||||
}
|
||||
if in.ProxySecretRef != nil {
|
||||
in, out := &in.ProxySecretRef, &out.ProxySecretRef
|
||||
*out = new(meta.LocalObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
if in.Ignore != nil {
|
||||
in, out := &in.Ignore, &out.Ignore
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.Include != nil {
|
||||
in, out := &in.Include, &out.Include
|
||||
*out = make([]GitRepositoryInclude, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.SparseCheckout != nil {
|
||||
in, out := &in.SparseCheckout, &out.SparseCheckout
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositorySpec.
|
||||
func (in *GitRepositorySpec) DeepCopy() *GitRepositorySpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(GitRepositorySpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *GitRepositoryStatus) DeepCopyInto(out *GitRepositoryStatus) {
|
||||
*out = *in
|
||||
if in.Conditions != nil {
|
||||
in, out := &in.Conditions, &out.Conditions
|
||||
*out = make([]metav1.Condition, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.Artifact != nil {
|
||||
in, out := &in.Artifact, &out.Artifact
|
||||
*out = new(Artifact)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.IncludedArtifacts != nil {
|
||||
in, out := &in.IncludedArtifacts, &out.IncludedArtifacts
|
||||
*out = make([]*Artifact, len(*in))
|
||||
for i := range *in {
|
||||
if (*in)[i] != nil {
|
||||
in, out := &(*in)[i], &(*out)[i]
|
||||
*out = new(Artifact)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
}
|
||||
if in.ObservedIgnore != nil {
|
||||
in, out := &in.ObservedIgnore, &out.ObservedIgnore
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.ObservedInclude != nil {
|
||||
in, out := &in.ObservedInclude, &out.ObservedInclude
|
||||
*out = make([]GitRepositoryInclude, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.ObservedSparseCheckout != nil {
|
||||
in, out := &in.ObservedSparseCheckout, &out.ObservedSparseCheckout
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.SourceVerificationMode != nil {
|
||||
in, out := &in.SourceVerificationMode, &out.SourceVerificationMode
|
||||
*out = new(GitVerificationMode)
|
||||
**out = **in
|
||||
}
|
||||
out.ReconcileRequestStatus = in.ReconcileRequestStatus
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryStatus.
|
||||
func (in *GitRepositoryStatus) DeepCopy() *GitRepositoryStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(GitRepositoryStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *GitRepositoryVerification) DeepCopyInto(out *GitRepositoryVerification) {
|
||||
*out = *in
|
||||
out.SecretRef = in.SecretRef
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepositoryVerification.
|
||||
func (in *GitRepositoryVerification) DeepCopy() *GitRepositoryVerification {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(GitRepositoryVerification)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *HelmChart) DeepCopyInto(out *HelmChart) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChart.
|
||||
func (in *HelmChart) DeepCopy() *HelmChart {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(HelmChart)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *HelmChart) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *HelmChartList) DeepCopyInto(out *HelmChartList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]HelmChart, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartList.
|
||||
func (in *HelmChartList) DeepCopy() *HelmChartList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(HelmChartList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *HelmChartList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *HelmChartSpec) DeepCopyInto(out *HelmChartSpec) {
|
||||
*out = *in
|
||||
out.SourceRef = in.SourceRef
|
||||
out.Interval = in.Interval
|
||||
if in.ValuesFiles != nil {
|
||||
in, out := &in.ValuesFiles, &out.ValuesFiles
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Verify != nil {
|
||||
in, out := &in.Verify, &out.Verify
|
||||
*out = new(OCIRepositoryVerification)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartSpec.
|
||||
func (in *HelmChartSpec) DeepCopy() *HelmChartSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(HelmChartSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *HelmChartStatus) DeepCopyInto(out *HelmChartStatus) {
|
||||
*out = *in
|
||||
if in.ObservedValuesFiles != nil {
|
||||
in, out := &in.ObservedValuesFiles, &out.ObservedValuesFiles
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Conditions != nil {
|
||||
in, out := &in.Conditions, &out.Conditions
|
||||
*out = make([]metav1.Condition, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.Artifact != nil {
|
||||
in, out := &in.Artifact, &out.Artifact
|
||||
*out = new(Artifact)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
out.ReconcileRequestStatus = in.ReconcileRequestStatus
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartStatus.
|
||||
func (in *HelmChartStatus) DeepCopy() *HelmChartStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(HelmChartStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *HelmRepository) DeepCopyInto(out *HelmRepository) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepository.
|
||||
func (in *HelmRepository) DeepCopy() *HelmRepository {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(HelmRepository)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *HelmRepository) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *HelmRepositoryList) DeepCopyInto(out *HelmRepositoryList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]HelmRepository, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepositoryList.
|
||||
func (in *HelmRepositoryList) DeepCopy() *HelmRepositoryList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(HelmRepositoryList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *HelmRepositoryList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *HelmRepositorySpec) DeepCopyInto(out *HelmRepositorySpec) {
|
||||
*out = *in
|
||||
if in.SecretRef != nil {
|
||||
in, out := &in.SecretRef, &out.SecretRef
|
||||
*out = new(meta.LocalObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
if in.CertSecretRef != nil {
|
||||
in, out := &in.CertSecretRef, &out.CertSecretRef
|
||||
*out = new(meta.LocalObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
out.Interval = in.Interval
|
||||
if in.Timeout != nil {
|
||||
in, out := &in.Timeout, &out.Timeout
|
||||
*out = new(metav1.Duration)
|
||||
**out = **in
|
||||
}
|
||||
if in.AccessFrom != nil {
|
||||
in, out := &in.AccessFrom, &out.AccessFrom
|
||||
*out = new(acl.AccessFrom)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepositorySpec.
|
||||
func (in *HelmRepositorySpec) DeepCopy() *HelmRepositorySpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(HelmRepositorySpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *HelmRepositoryStatus) DeepCopyInto(out *HelmRepositoryStatus) {
|
||||
*out = *in
|
||||
if in.Conditions != nil {
|
||||
in, out := &in.Conditions, &out.Conditions
|
||||
*out = make([]metav1.Condition, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.Artifact != nil {
|
||||
in, out := &in.Artifact, &out.Artifact
|
||||
*out = new(Artifact)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
out.ReconcileRequestStatus = in.ReconcileRequestStatus
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRepositoryStatus.
|
||||
func (in *HelmRepositoryStatus) DeepCopy() *HelmRepositoryStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(HelmRepositoryStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *LocalHelmChartSourceReference) DeepCopyInto(out *LocalHelmChartSourceReference) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalHelmChartSourceReference.
|
||||
func (in *LocalHelmChartSourceReference) DeepCopy() *LocalHelmChartSourceReference {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(LocalHelmChartSourceReference)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *OCILayerSelector) DeepCopyInto(out *OCILayerSelector) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCILayerSelector.
|
||||
func (in *OCILayerSelector) DeepCopy() *OCILayerSelector {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(OCILayerSelector)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *OCIRepository) DeepCopyInto(out *OCIRepository) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepository.
|
||||
func (in *OCIRepository) DeepCopy() *OCIRepository {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(OCIRepository)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *OCIRepository) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *OCIRepositoryList) DeepCopyInto(out *OCIRepositoryList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]OCIRepository, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryList.
|
||||
func (in *OCIRepositoryList) DeepCopy() *OCIRepositoryList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(OCIRepositoryList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *OCIRepositoryList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *OCIRepositoryRef) DeepCopyInto(out *OCIRepositoryRef) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryRef.
|
||||
func (in *OCIRepositoryRef) DeepCopy() *OCIRepositoryRef {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(OCIRepositoryRef)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *OCIRepositorySpec) DeepCopyInto(out *OCIRepositorySpec) {
|
||||
*out = *in
|
||||
if in.Reference != nil {
|
||||
in, out := &in.Reference, &out.Reference
|
||||
*out = new(OCIRepositoryRef)
|
||||
**out = **in
|
||||
}
|
||||
if in.LayerSelector != nil {
|
||||
in, out := &in.LayerSelector, &out.LayerSelector
|
||||
*out = new(OCILayerSelector)
|
||||
**out = **in
|
||||
}
|
||||
if in.SecretRef != nil {
|
||||
in, out := &in.SecretRef, &out.SecretRef
|
||||
*out = new(meta.LocalObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
if in.Verify != nil {
|
||||
in, out := &in.Verify, &out.Verify
|
||||
*out = new(OCIRepositoryVerification)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.CertSecretRef != nil {
|
||||
in, out := &in.CertSecretRef, &out.CertSecretRef
|
||||
*out = new(meta.LocalObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
if in.ProxySecretRef != nil {
|
||||
in, out := &in.ProxySecretRef, &out.ProxySecretRef
|
||||
*out = new(meta.LocalObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
out.Interval = in.Interval
|
||||
if in.Timeout != nil {
|
||||
in, out := &in.Timeout, &out.Timeout
|
||||
*out = new(metav1.Duration)
|
||||
**out = **in
|
||||
}
|
||||
if in.Ignore != nil {
|
||||
in, out := &in.Ignore, &out.Ignore
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositorySpec.
|
||||
func (in *OCIRepositorySpec) DeepCopy() *OCIRepositorySpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(OCIRepositorySpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *OCIRepositoryStatus) DeepCopyInto(out *OCIRepositoryStatus) {
|
||||
*out = *in
|
||||
if in.Conditions != nil {
|
||||
in, out := &in.Conditions, &out.Conditions
|
||||
*out = make([]metav1.Condition, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.Artifact != nil {
|
||||
in, out := &in.Artifact, &out.Artifact
|
||||
*out = new(Artifact)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ObservedIgnore != nil {
|
||||
in, out := &in.ObservedIgnore, &out.ObservedIgnore
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.ObservedLayerSelector != nil {
|
||||
in, out := &in.ObservedLayerSelector, &out.ObservedLayerSelector
|
||||
*out = new(OCILayerSelector)
|
||||
**out = **in
|
||||
}
|
||||
out.ReconcileRequestStatus = in.ReconcileRequestStatus
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryStatus.
|
||||
func (in *OCIRepositoryStatus) DeepCopy() *OCIRepositoryStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(OCIRepositoryStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *OCIRepositoryVerification) DeepCopyInto(out *OCIRepositoryVerification) {
|
||||
*out = *in
|
||||
if in.SecretRef != nil {
|
||||
in, out := &in.SecretRef, &out.SecretRef
|
||||
*out = new(meta.LocalObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
if in.MatchOIDCIdentity != nil {
|
||||
in, out := &in.MatchOIDCIdentity, &out.MatchOIDCIdentity
|
||||
*out = make([]OIDCIdentityMatch, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryVerification.
|
||||
func (in *OCIRepositoryVerification) DeepCopy() *OCIRepositoryVerification {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(OCIRepositoryVerification)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *OIDCIdentityMatch) DeepCopyInto(out *OIDCIdentityMatch) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCIdentityMatch.
|
||||
func (in *OIDCIdentityMatch) DeepCopy() *OIDCIdentityMatch {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(OIDCIdentityMatch)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
|
@ -193,9 +193,9 @@ func (in *Bucket) GetInterval() metav1.Duration {
|
|||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:Namespaced
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:deprecatedversion:warning="v1beta1 Bucket is deprecated, upgrade to v1"
|
||||
// +kubebuilder:printcolumn:name="Endpoint",type=string,JSONPath=`.spec.endpoint`
|
||||
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
|
||||
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
|
||||
|
|
|
@ -265,10 +265,10 @@ func (in *GitRepository) GetInterval() metav1.Duration {
|
|||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:Namespaced
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:resource:shortName=gitrepo
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:deprecatedversion:warning="v1beta1 GitRepository is deprecated, upgrade to v1"
|
||||
// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url`
|
||||
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
|
||||
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
|
||||
|
|
|
@ -231,10 +231,10 @@ func (in *HelmChart) GetValuesFiles() []string {
|
|||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:Namespaced
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:resource:shortName=hc
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:deprecatedversion:warning="v1beta1 HelmChart is deprecated, upgrade to v1"
|
||||
// +kubebuilder:printcolumn:name="Chart",type=string,JSONPath=`.spec.chart`
|
||||
// +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.spec.version`
|
||||
// +kubebuilder:printcolumn:name="Source Kind",type=string,JSONPath=`.spec.sourceRef.kind`
|
||||
|
|
|
@ -43,7 +43,7 @@ type HelmRepositorySpec struct {
|
|||
// For HTTP/S basic auth the secret must contain username and
|
||||
// password fields.
|
||||
// For TLS the secret must contain a certFile and keyFile, and/or
|
||||
// caFile fields.
|
||||
// caCert fields.
|
||||
// +optional
|
||||
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
|
||||
|
||||
|
@ -181,10 +181,10 @@ func (in *HelmRepository) GetInterval() metav1.Duration {
|
|||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:Namespaced
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:resource:shortName=helmrepo
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:deprecatedversion:warning="v1beta1 HelmRepository is deprecated, upgrade to v1"
|
||||
// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url`
|
||||
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
|
||||
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Flux authors
|
||||
Copyright 2022 The Flux authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -18,16 +18,12 @@ package v1beta2
|
|||
|
||||
import (
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// Artifact represents the output of a Source reconciliation.
|
||||
//
|
||||
// Deprecated: use Artifact from api/v1 instead. This type will be removed in
|
||||
// a future release.
|
||||
type Artifact struct {
|
||||
// Path is the relative file path of the Artifact. It can be used to locate
|
||||
// the file in the root of the Artifact storage on the local file system of
|
||||
|
@ -47,14 +43,8 @@ type Artifact struct {
|
|||
Revision string `json:"revision"`
|
||||
|
||||
// Checksum is the SHA256 checksum of the Artifact file.
|
||||
// Deprecated: use Artifact.Digest instead.
|
||||
// +optional
|
||||
Checksum string `json:"checksum,omitempty"`
|
||||
|
||||
// Digest is the digest of the file in the form of '<algorithm>:<checksum>'.
|
||||
// +optional
|
||||
// +kubebuilder:validation:Pattern="^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$"
|
||||
Digest string `json:"digest,omitempty"`
|
||||
Checksum string `json:"checksum"`
|
||||
|
||||
// LastUpdateTime is the timestamp corresponding to the last update of the
|
||||
// Artifact.
|
||||
|
@ -76,7 +66,7 @@ func (in *Artifact) HasRevision(revision string) bool {
|
|||
if in == nil {
|
||||
return false
|
||||
}
|
||||
return TransformLegacyRevision(in.Revision) == TransformLegacyRevision(revision)
|
||||
return in.Revision == revision
|
||||
}
|
||||
|
||||
// HasChecksum returns if the given checksum matches the current Checksum of
|
||||
|
@ -100,60 +90,3 @@ func ArtifactDir(kind, namespace, name string) string {
|
|||
func ArtifactPath(kind, namespace, name, filename string) string {
|
||||
return path.Join(ArtifactDir(kind, namespace, name), filename)
|
||||
}
|
||||
|
||||
// TransformLegacyRevision transforms a "legacy" revision string into a "new"
|
||||
// revision string. It accepts the following formats:
|
||||
//
|
||||
// - main/5394cb7f48332b2de7c17dd8b8384bbc84b7e738
|
||||
// - feature/branch/5394cb7f48332b2de7c17dd8b8384bbc84b7e738
|
||||
// - HEAD/5394cb7f48332b2de7c17dd8b8384bbc84b7e738
|
||||
// - tag/55609ff9d959589ed917ce32e6bc0f0a36809565f308602c15c3668965979edc
|
||||
// - d52bde83c5b2bd0fa7910264e0afc3ac9cfe9b6636ca29c05c09742f01d5a4bd
|
||||
//
|
||||
// Which are transformed into the following formats respectively:
|
||||
//
|
||||
// - main@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738
|
||||
// - feature/branch@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738
|
||||
// - sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738
|
||||
// - tag@sha256:55609ff9d959589ed917ce32e6bc0f0a36809565f308602c15c3668965979edc
|
||||
// - sha256:d52bde83c5b2bd0fa7910264e0afc3ac9cfe9b6636ca29c05c09742f01d5a4bd
|
||||
//
|
||||
// Deprecated, this function exists for backwards compatibility with existing
|
||||
// resources, and to provide a transition period. Will be removed in a future
|
||||
// release.
|
||||
func TransformLegacyRevision(rev string) string {
|
||||
if rev != "" && strings.LastIndex(rev, ":") == -1 {
|
||||
if i := strings.LastIndex(rev, "/"); i >= 0 {
|
||||
sha := rev[i+1:]
|
||||
if algo := determineSHAType(sha); algo != "" {
|
||||
if name := rev[:i]; name != "HEAD" {
|
||||
return name + "@" + algo + ":" + sha
|
||||
}
|
||||
return algo + ":" + sha
|
||||
}
|
||||
}
|
||||
if algo := determineSHAType(rev); algo != "" {
|
||||
return algo + ":" + rev
|
||||
}
|
||||
}
|
||||
return rev
|
||||
}
|
||||
|
||||
// isAlphaNumHex returns true if the given string only contains 0-9 and a-f
|
||||
// characters.
|
||||
var isAlphaNumHex = regexp.MustCompile(`^[0-9a-f]+$`).MatchString
|
||||
|
||||
// determineSHAType returns the SHA algorithm used to compute the provided hex.
|
||||
// The determination is heuristic and based on the length of the hex string. If
|
||||
// the size is not recognized, an empty string is returned.
|
||||
func determineSHAType(hex string) string {
|
||||
if isAlphaNumHex(hex) {
|
||||
switch len(hex) {
|
||||
case 40:
|
||||
return "sha1"
|
||||
case 64:
|
||||
return "sha256"
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
|
|
@ -1,78 +0,0 @@
|
|||
/*
|
||||
Copyright 2023 The Flux authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1beta2
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestTransformLegacyRevision(t *testing.T) {
|
||||
tests := []struct {
|
||||
rev string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
rev: "HEAD/5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
|
||||
want: "sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
|
||||
},
|
||||
{
|
||||
rev: "main/5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
|
||||
want: "main@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
|
||||
},
|
||||
{
|
||||
rev: "main@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
|
||||
want: "main@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
|
||||
},
|
||||
{
|
||||
rev: "feature/branch/5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
|
||||
want: "feature/branch@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
|
||||
},
|
||||
{
|
||||
rev: "feature/branch@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
|
||||
want: "feature/branch@sha1:5394cb7f48332b2de7c17dd8b8384bbc84b7e738",
|
||||
},
|
||||
{
|
||||
rev: "5ac85ca617f3774baff4ae0a420b810b2546dbc9af9f346b1d55c5ed9873c55c",
|
||||
want: "sha256:5ac85ca617f3774baff4ae0a420b810b2546dbc9af9f346b1d55c5ed9873c55c",
|
||||
},
|
||||
{
|
||||
rev: "v1.0.0",
|
||||
want: "v1.0.0",
|
||||
},
|
||||
{
|
||||
rev: "v1.0.0-rc1",
|
||||
want: "v1.0.0-rc1",
|
||||
},
|
||||
{
|
||||
rev: "v1.0.0-rc1+metadata",
|
||||
want: "v1.0.0-rc1+metadata",
|
||||
},
|
||||
{
|
||||
rev: "arbitrary/revision",
|
||||
want: "arbitrary/revision",
|
||||
},
|
||||
{
|
||||
rev: "5394cb7f48332b2de7c17dd8b8384bbc84b7xxxx",
|
||||
want: "5394cb7f48332b2de7c17dd8b8384bbc84b7xxxx",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.rev, func(t *testing.T) {
|
||||
if got := TransformLegacyRevision(tt.rev); got != tt.want {
|
||||
t.Errorf("TransformLegacyRevision() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -23,8 +23,6 @@ import (
|
|||
|
||||
"github.com/fluxcd/pkg/apis/acl"
|
||||
"github.com/fluxcd/pkg/apis/meta"
|
||||
|
||||
apiv1 "github.com/fluxcd/source-controller/api/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -33,48 +31,22 @@ const (
|
|||
)
|
||||
|
||||
const (
|
||||
// BucketProviderGeneric for any S3 API compatible storage Bucket.
|
||||
BucketProviderGeneric string = apiv1.BucketProviderGeneric
|
||||
// BucketProviderAmazon for an AWS S3 object storage Bucket.
|
||||
// Provides support for retrieving credentials from the AWS EC2 service.
|
||||
BucketProviderAmazon string = apiv1.BucketProviderAmazon
|
||||
// BucketProviderGoogle for a Google Cloud Storage Bucket.
|
||||
// Provides support for authentication using a workload identity.
|
||||
BucketProviderGoogle string = apiv1.BucketProviderGoogle
|
||||
// BucketProviderAzure for an Azure Blob Storage Bucket.
|
||||
// Provides support for authentication using a Service Principal,
|
||||
// Managed Identity or Shared Key.
|
||||
BucketProviderAzure string = apiv1.BucketProviderAzure
|
||||
|
||||
// GenericBucketProvider for any S3 API compatible storage Bucket.
|
||||
//
|
||||
// Deprecated: use BucketProviderGeneric.
|
||||
GenericBucketProvider string = apiv1.BucketProviderGeneric
|
||||
GenericBucketProvider string = "generic"
|
||||
// AmazonBucketProvider for an AWS S3 object storage Bucket.
|
||||
// Provides support for retrieving credentials from the AWS EC2 service.
|
||||
//
|
||||
// Deprecated: use BucketProviderAmazon.
|
||||
AmazonBucketProvider string = apiv1.BucketProviderAmazon
|
||||
AmazonBucketProvider string = "aws"
|
||||
// GoogleBucketProvider for a Google Cloud Storage Bucket.
|
||||
// Provides support for authentication using a workload identity.
|
||||
//
|
||||
// Deprecated: use BucketProviderGoogle.
|
||||
GoogleBucketProvider string = apiv1.BucketProviderGoogle
|
||||
GoogleBucketProvider string = "gcp"
|
||||
// AzureBucketProvider for an Azure Blob Storage Bucket.
|
||||
// Provides support for authentication using a Service Principal,
|
||||
// Managed Identity or Shared Key.
|
||||
//
|
||||
// Deprecated: use BucketProviderAzure.
|
||||
AzureBucketProvider string = apiv1.BucketProviderAzure
|
||||
AzureBucketProvider string = "azure"
|
||||
)
|
||||
|
||||
// BucketSpec specifies the required configuration to produce an Artifact for
|
||||
// an object storage bucket.
|
||||
// +kubebuilder:validation:XValidation:rule="self.provider == 'aws' || self.provider == 'generic' || !has(self.sts)", message="STS configuration is only supported for the 'aws' and 'generic' Bucket providers"
|
||||
// +kubebuilder:validation:XValidation:rule="self.provider != 'aws' || !has(self.sts) || self.sts.provider == 'aws'", message="'aws' is the only supported STS provider for the 'aws' Bucket provider"
|
||||
// +kubebuilder:validation:XValidation:rule="self.provider != 'generic' || !has(self.sts) || self.sts.provider == 'ldap'", message="'ldap' is the only supported STS provider for the 'generic' Bucket provider"
|
||||
// +kubebuilder:validation:XValidation:rule="!has(self.sts) || self.sts.provider != 'aws' || !has(self.sts.secretRef)", message="spec.sts.secretRef is not required for the 'aws' STS provider"
|
||||
// +kubebuilder:validation:XValidation:rule="!has(self.sts) || self.sts.provider != 'aws' || !has(self.sts.certSecretRef)", message="spec.sts.certSecretRef is not required for the 'aws' STS provider"
|
||||
type BucketSpec struct {
|
||||
// Provider of the object storage bucket.
|
||||
// Defaults to 'generic', which expects an S3 (API) compatible object
|
||||
|
@ -92,14 +64,6 @@ type BucketSpec struct {
|
|||
// +required
|
||||
Endpoint string `json:"endpoint"`
|
||||
|
||||
// STS specifies the required configuration to use a Security Token
|
||||
// Service for fetching temporary credentials to authenticate in a
|
||||
// Bucket provider.
|
||||
//
|
||||
// This field is only supported for the `aws` and `generic` providers.
|
||||
// +optional
|
||||
STS *BucketSTSSpec `json:"sts,omitempty"`
|
||||
|
||||
// Insecure allows connecting to a non-TLS HTTP Endpoint.
|
||||
// +optional
|
||||
Insecure bool `json:"insecure,omitempty"`
|
||||
|
@ -108,40 +72,12 @@ type BucketSpec struct {
|
|||
// +optional
|
||||
Region string `json:"region,omitempty"`
|
||||
|
||||
// Prefix to use for server-side filtering of files in the Bucket.
|
||||
// +optional
|
||||
Prefix string `json:"prefix,omitempty"`
|
||||
|
||||
// SecretRef specifies the Secret containing authentication credentials
|
||||
// for the Bucket.
|
||||
// +optional
|
||||
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
|
||||
|
||||
// CertSecretRef can be given the name of a Secret containing
|
||||
// either or both of
|
||||
//
|
||||
// - a PEM-encoded client certificate (`tls.crt`) and private
|
||||
// key (`tls.key`);
|
||||
// - a PEM-encoded CA certificate (`ca.crt`)
|
||||
//
|
||||
// and whichever are supplied, will be used for connecting to the
|
||||
// bucket. The client cert and key are useful if you are
|
||||
// authenticating with a certificate; the CA cert is useful if
|
||||
// you are using a self-signed server certificate. The Secret must
|
||||
// be of type `Opaque` or `kubernetes.io/tls`.
|
||||
//
|
||||
// This field is only supported for the `generic` provider.
|
||||
// +optional
|
||||
CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"`
|
||||
|
||||
// ProxySecretRef specifies the Secret containing the proxy configuration
|
||||
// to use while communicating with the Bucket server.
|
||||
// +optional
|
||||
ProxySecretRef *meta.LocalObjectReference `json:"proxySecretRef,omitempty"`
|
||||
|
||||
// Interval at which the Bucket Endpoint is checked for updates.
|
||||
// This interval is approximate and may be subject to jitter to ensure
|
||||
// efficient use of resources.
|
||||
// Interval at which to check the Endpoint for updates.
|
||||
// +kubebuilder:validation:Type=string
|
||||
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
|
||||
// +required
|
||||
|
@ -172,45 +108,6 @@ type BucketSpec struct {
|
|||
AccessFrom *acl.AccessFrom `json:"accessFrom,omitempty"`
|
||||
}
|
||||
|
||||
// BucketSTSSpec specifies the required configuration to use a Security Token
|
||||
// Service for fetching temporary credentials to authenticate in a Bucket
|
||||
// provider.
|
||||
type BucketSTSSpec struct {
|
||||
// Provider of the Security Token Service.
|
||||
// +kubebuilder:validation:Enum=aws;ldap
|
||||
// +required
|
||||
Provider string `json:"provider"`
|
||||
|
||||
// Endpoint is the HTTP/S endpoint of the Security Token Service from
|
||||
// where temporary credentials will be fetched.
|
||||
// +required
|
||||
// +kubebuilder:validation:Pattern="^(http|https)://.*$"
|
||||
Endpoint string `json:"endpoint"`
|
||||
|
||||
// SecretRef specifies the Secret containing authentication credentials
|
||||
// for the STS endpoint. This Secret must contain the fields `username`
|
||||
// and `password` and is supported only for the `ldap` provider.
|
||||
// +optional
|
||||
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
|
||||
|
||||
// CertSecretRef can be given the name of a Secret containing
|
||||
// either or both of
|
||||
//
|
||||
// - a PEM-encoded client certificate (`tls.crt`) and private
|
||||
// key (`tls.key`);
|
||||
// - a PEM-encoded CA certificate (`ca.crt`)
|
||||
//
|
||||
// and whichever are supplied, will be used for connecting to the
|
||||
// STS endpoint. The client cert and key are useful if you are
|
||||
// authenticating with a certificate; the CA cert is useful if
|
||||
// you are using a self-signed server certificate. The Secret must
|
||||
// be of type `Opaque` or `kubernetes.io/tls`.
|
||||
//
|
||||
// This field is only supported for the `ldap` provider.
|
||||
// +optional
|
||||
CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"`
|
||||
}
|
||||
|
||||
// BucketStatus records the observed state of a Bucket.
|
||||
type BucketStatus struct {
|
||||
// ObservedGeneration is the last observed generation of the Bucket object.
|
||||
|
@ -229,7 +126,7 @@ type BucketStatus struct {
|
|||
|
||||
// Artifact represents the last successful Bucket reconciliation.
|
||||
// +optional
|
||||
Artifact *apiv1.Artifact `json:"artifact,omitempty"`
|
||||
Artifact *Artifact `json:"artifact,omitempty"`
|
||||
|
||||
// ObservedIgnore is the observed exclusion patterns used for constructing
|
||||
// the source artifact.
|
||||
|
@ -265,14 +162,15 @@ func (in Bucket) GetRequeueAfter() time.Duration {
|
|||
}
|
||||
|
||||
// GetArtifact returns the latest artifact from the source if present in the status sub-resource.
|
||||
func (in *Bucket) GetArtifact() *apiv1.Artifact {
|
||||
func (in *Bucket) GetArtifact() *Artifact {
|
||||
return in.Status.Artifact
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:Namespaced
|
||||
// +kubebuilder:storageversion
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:deprecatedversion:warning="v1beta2 Bucket is deprecated, upgrade to v1"
|
||||
// +kubebuilder:printcolumn:name="Endpoint",type=string,JSONPath=`.spec.endpoint`
|
||||
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
|
||||
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
|
||||
|
|
|
@ -23,8 +23,6 @@ import (
|
|||
|
||||
"github.com/fluxcd/pkg/apis/acl"
|
||||
"github.com/fluxcd/pkg/apis/meta"
|
||||
|
||||
apiv1 "github.com/fluxcd/source-controller/api/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -57,7 +55,7 @@ type GitRepositorySpec struct {
|
|||
// SecretRef specifies the Secret containing authentication credentials for
|
||||
// the GitRepository.
|
||||
// For HTTPS repositories the Secret must contain 'username' and 'password'
|
||||
// fields for basic auth or 'bearerToken' field for token auth.
|
||||
// fields.
|
||||
// For SSH repositories the Secret must contain 'identity'
|
||||
// and 'known_hosts' fields.
|
||||
// +optional
|
||||
|
@ -99,8 +97,6 @@ type GitRepositorySpec struct {
|
|||
|
||||
// GitImplementation specifies which Git client library implementation to
|
||||
// use. Defaults to 'go-git', valid values are ('go-git', 'libgit2').
|
||||
// Deprecated: gitImplementation is deprecated now that 'go-git' is the
|
||||
// only supported implementation.
|
||||
// +kubebuilder:validation:Enum=go-git;libgit2
|
||||
// +kubebuilder:default:=go-git
|
||||
// +optional
|
||||
|
@ -108,6 +104,7 @@ type GitRepositorySpec struct {
|
|||
|
||||
// RecurseSubmodules enables the initialization of all submodules within
|
||||
// the GitRepository as cloned from the URL, using their default settings.
|
||||
// This option is available only when using the 'go-git' GitImplementation.
|
||||
// +optional
|
||||
RecurseSubmodules bool `json:"recurseSubmodules,omitempty"`
|
||||
|
||||
|
@ -157,6 +154,9 @@ func (in *GitRepositoryInclude) GetToPath() string {
|
|||
// GitRepositoryRef specifies the Git reference to resolve and checkout.
|
||||
type GitRepositoryRef struct {
|
||||
// Branch to check out, defaults to 'master' if no other field is defined.
|
||||
//
|
||||
// When GitRepositorySpec.GitImplementation is set to 'go-git', a shallow
|
||||
// clone of the specified branch is performed.
|
||||
// +optional
|
||||
Branch string `json:"branch,omitempty"`
|
||||
|
||||
|
@ -168,17 +168,11 @@ type GitRepositoryRef struct {
|
|||
// +optional
|
||||
SemVer string `json:"semver,omitempty"`
|
||||
|
||||
// Name of the reference to check out; takes precedence over Branch, Tag and SemVer.
|
||||
//
|
||||
// It must be a valid Git reference: https://git-scm.com/docs/git-check-ref-format#_description
|
||||
// Examples: "refs/heads/main", "refs/tags/v0.1.0", "refs/pull/420/head", "refs/merge-requests/1/head"
|
||||
// +optional
|
||||
Name string `json:"name,omitempty"`
|
||||
|
||||
// Commit SHA to check out, takes precedence over all reference fields.
|
||||
//
|
||||
// This can be combined with Branch to shallow clone the branch, in which
|
||||
// the commit is expected to exist.
|
||||
// When GitRepositorySpec.GitImplementation is set to 'go-git', this can be
|
||||
// combined with Branch to shallow clone the branch, in which the commit is
|
||||
// expected to exist.
|
||||
// +optional
|
||||
Commit string `json:"commit,omitempty"`
|
||||
}
|
||||
|
@ -192,7 +186,7 @@ type GitRepositoryVerification struct {
|
|||
|
||||
// SecretRef specifies the Secret containing the public keys of trusted Git
|
||||
// authors.
|
||||
SecretRef meta.LocalObjectReference `json:"secretRef"`
|
||||
SecretRef meta.LocalObjectReference `json:"secretRef,omitempty"`
|
||||
}
|
||||
|
||||
// GitRepositoryStatus records the observed state of a Git repository.
|
||||
|
@ -214,12 +208,12 @@ type GitRepositoryStatus struct {
|
|||
|
||||
// Artifact represents the last successful GitRepository reconciliation.
|
||||
// +optional
|
||||
Artifact *apiv1.Artifact `json:"artifact,omitempty"`
|
||||
Artifact *Artifact `json:"artifact,omitempty"`
|
||||
|
||||
// IncludedArtifacts contains a list of the last successfully included
|
||||
// Artifacts as instructed by GitRepositorySpec.Include.
|
||||
// +optional
|
||||
IncludedArtifacts []*apiv1.Artifact `json:"includedArtifacts,omitempty"`
|
||||
IncludedArtifacts []*Artifact `json:"includedArtifacts,omitempty"`
|
||||
|
||||
// ContentConfigChecksum is a checksum of all the configurations related to
|
||||
// the content of the source artifact:
|
||||
|
@ -282,15 +276,16 @@ func (in GitRepository) GetRequeueAfter() time.Duration {
|
|||
|
||||
// GetArtifact returns the latest Artifact from the GitRepository if present in
|
||||
// the status sub-resource.
|
||||
func (in *GitRepository) GetArtifact() *apiv1.Artifact {
|
||||
func (in *GitRepository) GetArtifact() *Artifact {
|
||||
return in.Status.Artifact
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:Namespaced
|
||||
// +kubebuilder:storageversion
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:resource:shortName=gitrepo
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:deprecatedversion:warning="v1beta2 GitRepository is deprecated, upgrade to v1"
|
||||
// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url`
|
||||
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
|
||||
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
|
||||
|
|
|
@ -23,8 +23,6 @@ import (
|
|||
|
||||
"github.com/fluxcd/pkg/apis/acl"
|
||||
"github.com/fluxcd/pkg/apis/meta"
|
||||
|
||||
apiv1 "github.com/fluxcd/source-controller/api/v1"
|
||||
)
|
||||
|
||||
// HelmChartKind is the string representation of a HelmChart.
|
||||
|
@ -47,9 +45,7 @@ type HelmChartSpec struct {
|
|||
// +required
|
||||
SourceRef LocalHelmChartSourceReference `json:"sourceRef"`
|
||||
|
||||
// Interval at which the HelmChart SourceRef is checked for updates.
|
||||
// This interval is approximate and may be subject to jitter to ensure
|
||||
// efficient use of resources.
|
||||
// Interval is the interval at which to check the Source for updates.
|
||||
// +kubebuilder:validation:Type=string
|
||||
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
|
||||
// +required
|
||||
|
@ -80,11 +76,6 @@ type HelmChartSpec struct {
|
|||
// +deprecated
|
||||
ValuesFile string `json:"valuesFile,omitempty"`
|
||||
|
||||
// IgnoreMissingValuesFiles controls whether to silently ignore missing values
|
||||
// files rather than failing.
|
||||
// +optional
|
||||
IgnoreMissingValuesFiles bool `json:"ignoreMissingValuesFiles,omitempty"`
|
||||
|
||||
// Suspend tells the controller to suspend the reconciliation of this
|
||||
// source.
|
||||
// +optional
|
||||
|
@ -102,7 +93,7 @@ type HelmChartSpec struct {
|
|||
// This field is only supported when using HelmRepository source with spec.type 'oci'.
|
||||
// Chart dependencies, which are not bundled in the umbrella chart artifact, are not verified.
|
||||
// +optional
|
||||
Verify *apiv1.OCIRepositoryVerification `json:"verify,omitempty"`
|
||||
Verify *OCIRepositoryVerification `json:"verify,omitempty"`
|
||||
}
|
||||
|
||||
const (
|
||||
|
@ -148,12 +139,6 @@ type HelmChartStatus struct {
|
|||
// +optional
|
||||
ObservedChartName string `json:"observedChartName,omitempty"`
|
||||
|
||||
// ObservedValuesFiles are the observed value files of the last successful
|
||||
// reconciliation.
|
||||
// It matches the chart in the last successfully reconciled artifact.
|
||||
// +optional
|
||||
ObservedValuesFiles []string `json:"observedValuesFiles,omitempty"`
|
||||
|
||||
// Conditions holds the conditions for the HelmChart.
|
||||
// +optional
|
||||
Conditions []metav1.Condition `json:"conditions,omitempty"`
|
||||
|
@ -166,7 +151,7 @@ type HelmChartStatus struct {
|
|||
|
||||
// Artifact represents the output of the last successful reconciliation.
|
||||
// +optional
|
||||
Artifact *apiv1.Artifact `json:"artifact,omitempty"`
|
||||
Artifact *Artifact `json:"artifact,omitempty"`
|
||||
|
||||
meta.ReconcileRequestStatus `json:",inline"`
|
||||
}
|
||||
|
@ -199,7 +184,7 @@ func (in HelmChart) GetRequeueAfter() time.Duration {
|
|||
|
||||
// GetArtifact returns the latest artifact from the source if present in the
|
||||
// status sub-resource.
|
||||
func (in *HelmChart) GetArtifact() *apiv1.Artifact {
|
||||
func (in *HelmChart) GetArtifact() *Artifact {
|
||||
return in.Status.Artifact
|
||||
}
|
||||
|
||||
|
@ -215,10 +200,11 @@ func (in *HelmChart) GetValuesFiles() []string {
|
|||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:Namespaced
|
||||
// +kubebuilder:storageversion
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:resource:shortName=hc
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:deprecatedversion:warning="v1beta2 HelmChart is deprecated, upgrade to v1"
|
||||
// +kubebuilder:printcolumn:name="Chart",type=string,JSONPath=`.spec.chart`
|
||||
// +kubebuilder:printcolumn:name="Version",type=string,JSONPath=`.spec.version`
|
||||
// +kubebuilder:printcolumn:name="Source Kind",type=string,JSONPath=`.spec.sourceRef.kind`
|
||||
|
|
|
@ -23,8 +23,6 @@ import (
|
|||
|
||||
"github.com/fluxcd/pkg/apis/acl"
|
||||
"github.com/fluxcd/pkg/apis/meta"
|
||||
|
||||
apiv1 "github.com/fluxcd/source-controller/api/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -45,7 +43,6 @@ const (
|
|||
type HelmRepositorySpec struct {
|
||||
// URL of the Helm repository, a valid URL contains at least a protocol and
|
||||
// host.
|
||||
// +kubebuilder:validation:Pattern="^(http|https|oci)://.*$"
|
||||
// +required
|
||||
URL string `json:"url"`
|
||||
|
||||
|
@ -53,29 +50,11 @@ type HelmRepositorySpec struct {
|
|||
// for the HelmRepository.
|
||||
// For HTTP/S basic auth the secret must contain 'username' and 'password'
|
||||
// fields.
|
||||
// Support for TLS auth using the 'certFile' and 'keyFile', and/or 'caFile'
|
||||
// keys is deprecated. Please use `.spec.certSecretRef` instead.
|
||||
// For TLS the secret must contain a 'certFile' and 'keyFile', and/or
|
||||
// 'caCert' fields.
|
||||
// +optional
|
||||
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
|
||||
|
||||
// CertSecretRef can be given the name of a Secret containing
|
||||
// either or both of
|
||||
//
|
||||
// - a PEM-encoded client certificate (`tls.crt`) and private
|
||||
// key (`tls.key`);
|
||||
// - a PEM-encoded CA certificate (`ca.crt`)
|
||||
//
|
||||
// and whichever are supplied, will be used for connecting to the
|
||||
// registry. The client cert and key are useful if you are
|
||||
// authenticating with a certificate; the CA cert is useful if
|
||||
// you are using a self-signed server certificate. The Secret must
|
||||
// be of type `Opaque` or `kubernetes.io/tls`.
|
||||
//
|
||||
// It takes precedence over the values specified in the Secret referred
|
||||
// to by `.spec.secretRef`.
|
||||
// +optional
|
||||
CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"`
|
||||
|
||||
// PassCredentials allows the credentials from the SecretRef to be passed
|
||||
// on to a host that does not match the host as defined in URL.
|
||||
// This may be required if the host of the advertised chart URLs in the
|
||||
|
@ -85,23 +64,16 @@ type HelmRepositorySpec struct {
|
|||
// +optional
|
||||
PassCredentials bool `json:"passCredentials,omitempty"`
|
||||
|
||||
// Interval at which the HelmRepository URL is checked for updates.
|
||||
// This interval is approximate and may be subject to jitter to ensure
|
||||
// efficient use of resources.
|
||||
// Interval at which to check the URL for updates.
|
||||
// +kubebuilder:validation:Type=string
|
||||
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
|
||||
// +optional
|
||||
Interval metav1.Duration `json:"interval,omitempty"`
|
||||
|
||||
// Insecure allows connecting to a non-TLS HTTP container registry.
|
||||
// This field is only taken into account if the .spec.type field is set to 'oci'.
|
||||
// +optional
|
||||
Insecure bool `json:"insecure,omitempty"`
|
||||
// +required
|
||||
Interval metav1.Duration `json:"interval"`
|
||||
|
||||
// Timeout is used for the index fetch operation for an HTTPS helm repository,
|
||||
// and for remote OCI Repository operations like pulling for an OCI helm
|
||||
// chart by the associated HelmChart.
|
||||
// and for remote OCI Repository operations like pulling for an OCI helm repository.
|
||||
// Its default value is 60s.
|
||||
// +kubebuilder:default:="60s"
|
||||
// +kubebuilder:validation:Type=string
|
||||
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m))+$"
|
||||
// +optional
|
||||
|
@ -152,7 +124,7 @@ type HelmRepositoryStatus struct {
|
|||
|
||||
// Artifact represents the last successful HelmRepository reconciliation.
|
||||
// +optional
|
||||
Artifact *apiv1.Artifact `json:"artifact,omitempty"`
|
||||
Artifact *Artifact `json:"artifact,omitempty"`
|
||||
|
||||
meta.ReconcileRequestStatus `json:",inline"`
|
||||
}
|
||||
|
@ -176,32 +148,21 @@ func (in *HelmRepository) SetConditions(conditions []metav1.Condition) {
|
|||
// GetRequeueAfter returns the duration after which the source must be
|
||||
// reconciled again.
|
||||
func (in HelmRepository) GetRequeueAfter() time.Duration {
|
||||
if in.Spec.Interval.Duration != 0 {
|
||||
return in.Spec.Interval.Duration
|
||||
}
|
||||
return time.Minute
|
||||
}
|
||||
|
||||
// GetTimeout returns the timeout duration used for various operations related
|
||||
// to this HelmRepository.
|
||||
func (in HelmRepository) GetTimeout() time.Duration {
|
||||
if in.Spec.Timeout != nil {
|
||||
return in.Spec.Timeout.Duration
|
||||
}
|
||||
return time.Minute
|
||||
return in.Spec.Interval.Duration
|
||||
}
|
||||
|
||||
// GetArtifact returns the latest artifact from the source if present in the
|
||||
// status sub-resource.
|
||||
func (in *HelmRepository) GetArtifact() *apiv1.Artifact {
|
||||
func (in *HelmRepository) GetArtifact() *Artifact {
|
||||
return in.Status.Artifact
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:Namespaced
|
||||
// +kubebuilder:storageversion
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:resource:shortName=helmrepo
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:deprecatedversion:warning="v1beta2 HelmRepository is deprecated, upgrade to v1"
|
||||
// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url`
|
||||
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description=""
|
||||
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
|
||||
|
|
|
@ -22,8 +22,6 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/fluxcd/pkg/apis/meta"
|
||||
|
||||
apiv1 "github.com/fluxcd/source-controller/api/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -90,7 +88,7 @@ type OCIRepositorySpec struct {
|
|||
// used to verify the signature and specifies which provider to use to check
|
||||
// whether OCI image is authentic.
|
||||
// +optional
|
||||
Verify *apiv1.OCIRepositoryVerification `json:"verify,omitempty"`
|
||||
Verify *OCIRepositoryVerification `json:"verify,omitempty"`
|
||||
|
||||
// ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate
|
||||
// the image pull if the service account has attached pull secrets. For more information:
|
||||
|
@ -98,32 +96,21 @@ type OCIRepositorySpec struct {
|
|||
// +optional
|
||||
ServiceAccountName string `json:"serviceAccountName,omitempty"`
|
||||
|
||||
// CertSecretRef can be given the name of a Secret containing
|
||||
// CertSecretRef can be given the name of a secret containing
|
||||
// either or both of
|
||||
//
|
||||
// - a PEM-encoded client certificate (`tls.crt`) and private
|
||||
// key (`tls.key`);
|
||||
// - a PEM-encoded CA certificate (`ca.crt`)
|
||||
// - a PEM-encoded client certificate (`certFile`) and private
|
||||
// key (`keyFile`);
|
||||
// - a PEM-encoded CA certificate (`caFile`)
|
||||
//
|
||||
// and whichever are supplied, will be used for connecting to the
|
||||
// registry. The client cert and key are useful if you are
|
||||
// authenticating with a certificate; the CA cert is useful if
|
||||
// you are using a self-signed server certificate. The Secret must
|
||||
// be of type `Opaque` or `kubernetes.io/tls`.
|
||||
//
|
||||
// Note: Support for the `caFile`, `certFile` and `keyFile` keys have
|
||||
// been deprecated.
|
||||
// and whichever are supplied, will be used for connecting to the
|
||||
// registry. The client cert and key are useful if you are
|
||||
// authenticating with a certificate; the CA cert is useful if
|
||||
// you are using a self-signed server certificate.
|
||||
// +optional
|
||||
CertSecretRef *meta.LocalObjectReference `json:"certSecretRef,omitempty"`
|
||||
|
||||
// ProxySecretRef specifies the Secret containing the proxy configuration
|
||||
// to use while communicating with the container registry.
|
||||
// +optional
|
||||
ProxySecretRef *meta.LocalObjectReference `json:"proxySecretRef,omitempty"`
|
||||
|
||||
// Interval at which the OCIRepository URL is checked for updates.
|
||||
// This interval is approximate and may be subject to jitter to ensure
|
||||
// efficient use of resources.
|
||||
// The interval at which to check for image updates.
|
||||
// +kubebuilder:validation:Type=string
|
||||
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ms|s|m|h))+$"
|
||||
// +required
|
||||
|
@ -163,10 +150,6 @@ type OCIRepositoryRef struct {
|
|||
// +optional
|
||||
SemVer string `json:"semver,omitempty"`
|
||||
|
||||
// SemverFilter is a regex pattern to filter the tags within the SemVer range.
|
||||
// +optional
|
||||
SemverFilter string `json:"semverFilter,omitempty"`
|
||||
|
||||
// Tag is the image tag to pull, defaults to latest.
|
||||
// +optional
|
||||
Tag string `json:"tag,omitempty"`
|
||||
|
@ -189,6 +172,19 @@ type OCILayerSelector struct {
|
|||
Operation string `json:"operation,omitempty"`
|
||||
}
|
||||
|
||||
// OCIRepositoryVerification verifies the authenticity of an OCI Artifact
|
||||
type OCIRepositoryVerification struct {
|
||||
// Provider specifies the technology used to sign the OCI Artifact.
|
||||
// +kubebuilder:validation:Enum=cosign
|
||||
// +kubebuilder:default:=cosign
|
||||
Provider string `json:"provider"`
|
||||
|
||||
// SecretRef specifies the Kubernetes Secret containing the
|
||||
// trusted public keys.
|
||||
// +optional
|
||||
SecretRef *meta.LocalObjectReference `json:"secretRef,omitempty"`
|
||||
}
|
||||
|
||||
// OCIRepositoryStatus defines the observed state of OCIRepository
|
||||
type OCIRepositoryStatus struct {
|
||||
// ObservedGeneration is the last observed generation.
|
||||
|
@ -205,7 +201,7 @@ type OCIRepositoryStatus struct {
|
|||
|
||||
// Artifact represents the output of the last successful OCI Repository sync.
|
||||
// +optional
|
||||
Artifact *apiv1.Artifact `json:"artifact,omitempty"`
|
||||
Artifact *Artifact `json:"artifact,omitempty"`
|
||||
|
||||
// ContentConfigChecksum is a checksum of all the configurations related to
|
||||
// the content of the source artifact:
|
||||
|
@ -260,7 +256,7 @@ func (in OCIRepository) GetRequeueAfter() time.Duration {
|
|||
|
||||
// GetArtifact returns the latest Artifact from the OCIRepository if present in
|
||||
// the status sub-resource.
|
||||
func (in *OCIRepository) GetArtifact() *apiv1.Artifact {
|
||||
func (in *OCIRepository) GetArtifact() *Artifact {
|
||||
return in.Status.Artifact
|
||||
}
|
||||
|
||||
|
@ -283,10 +279,11 @@ func (in *OCIRepository) GetLayerOperation() string {
|
|||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:Namespaced
|
||||
// +kubebuilder:storageversion
|
||||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:resource:shortName=ocirepo
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:deprecatedversion:warning="v1beta2 OCIRepository is deprecated, upgrade to v1"
|
||||
// +kubebuilder:printcolumn:name="URL",type=string,JSONPath=`.spec.url`
|
||||
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status",description=""
|
||||
// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].message",description=""
|
||||
|
|
|
@ -33,9 +33,6 @@ const (
|
|||
// interval. It must be supported by all kinds of the source.toolkit.fluxcd.io
|
||||
// API group.
|
||||
//
|
||||
// Deprecated: use the Source interface from api/v1 instead. This type will be
|
||||
// removed in a future release.
|
||||
//
|
||||
// +k8s:deepcopy-gen=false
|
||||
type Source interface {
|
||||
runtime.Object
|
||||
|
|
|
@ -1,26 +0,0 @@
|
|||
/*
|
||||
Copyright 2024 The Flux authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1beta2
|
||||
|
||||
const (
|
||||
// STSProviderAmazon represents the AWS provider for Security Token Service.
|
||||
// Provides support for fetching temporary credentials from an AWS STS endpoint.
|
||||
STSProviderAmazon string = "aws"
|
||||
// STSProviderLDAP represents the LDAP provider for Security Token Service.
|
||||
// Provides support for fetching temporary credentials from an LDAP endpoint.
|
||||
STSProviderLDAP string = "ldap"
|
||||
)
|
|
@ -1,7 +1,8 @@
|
|||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2024 The Flux authors
|
||||
Copyright 2022 The Flux authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
@ -23,7 +24,6 @@ package v1beta2
|
|||
import (
|
||||
"github.com/fluxcd/pkg/apis/acl"
|
||||
"github.com/fluxcd/pkg/apis/meta"
|
||||
apiv1 "github.com/fluxcd/source-controller/api/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
@ -115,54 +115,14 @@ func (in *BucketList) DeepCopyObject() runtime.Object {
|
|||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *BucketSTSSpec) DeepCopyInto(out *BucketSTSSpec) {
|
||||
*out = *in
|
||||
if in.SecretRef != nil {
|
||||
in, out := &in.SecretRef, &out.SecretRef
|
||||
*out = new(meta.LocalObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
if in.CertSecretRef != nil {
|
||||
in, out := &in.CertSecretRef, &out.CertSecretRef
|
||||
*out = new(meta.LocalObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BucketSTSSpec.
|
||||
func (in *BucketSTSSpec) DeepCopy() *BucketSTSSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(BucketSTSSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *BucketSpec) DeepCopyInto(out *BucketSpec) {
|
||||
*out = *in
|
||||
if in.STS != nil {
|
||||
in, out := &in.STS, &out.STS
|
||||
*out = new(BucketSTSSpec)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.SecretRef != nil {
|
||||
in, out := &in.SecretRef, &out.SecretRef
|
||||
*out = new(meta.LocalObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
if in.CertSecretRef != nil {
|
||||
in, out := &in.CertSecretRef, &out.CertSecretRef
|
||||
*out = new(meta.LocalObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
if in.ProxySecretRef != nil {
|
||||
in, out := &in.ProxySecretRef, &out.ProxySecretRef
|
||||
*out = new(meta.LocalObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
out.Interval = in.Interval
|
||||
if in.Timeout != nil {
|
||||
in, out := &in.Timeout, &out.Timeout
|
||||
|
@ -203,7 +163,7 @@ func (in *BucketStatus) DeepCopyInto(out *BucketStatus) {
|
|||
}
|
||||
if in.Artifact != nil {
|
||||
in, out := &in.Artifact, &out.Artifact
|
||||
*out = new(apiv1.Artifact)
|
||||
*out = new(Artifact)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ObservedIgnore != nil {
|
||||
|
@ -377,16 +337,16 @@ func (in *GitRepositoryStatus) DeepCopyInto(out *GitRepositoryStatus) {
|
|||
}
|
||||
if in.Artifact != nil {
|
||||
in, out := &in.Artifact, &out.Artifact
|
||||
*out = new(apiv1.Artifact)
|
||||
*out = new(Artifact)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.IncludedArtifacts != nil {
|
||||
in, out := &in.IncludedArtifacts, &out.IncludedArtifacts
|
||||
*out = make([]*apiv1.Artifact, len(*in))
|
||||
*out = make([]*Artifact, len(*in))
|
||||
for i := range *in {
|
||||
if (*in)[i] != nil {
|
||||
in, out := &(*in)[i], &(*out)[i]
|
||||
*out = new(apiv1.Artifact)
|
||||
*out = new(Artifact)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
@ -506,7 +466,7 @@ func (in *HelmChartSpec) DeepCopyInto(out *HelmChartSpec) {
|
|||
}
|
||||
if in.Verify != nil {
|
||||
in, out := &in.Verify, &out.Verify
|
||||
*out = new(apiv1.OCIRepositoryVerification)
|
||||
*out = new(OCIRepositoryVerification)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
|
@ -524,11 +484,6 @@ func (in *HelmChartSpec) DeepCopy() *HelmChartSpec {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *HelmChartStatus) DeepCopyInto(out *HelmChartStatus) {
|
||||
*out = *in
|
||||
if in.ObservedValuesFiles != nil {
|
||||
in, out := &in.ObservedValuesFiles, &out.ObservedValuesFiles
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Conditions != nil {
|
||||
in, out := &in.Conditions, &out.Conditions
|
||||
*out = make([]v1.Condition, len(*in))
|
||||
|
@ -538,7 +493,7 @@ func (in *HelmChartStatus) DeepCopyInto(out *HelmChartStatus) {
|
|||
}
|
||||
if in.Artifact != nil {
|
||||
in, out := &in.Artifact, &out.Artifact
|
||||
*out = new(apiv1.Artifact)
|
||||
*out = new(Artifact)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
out.ReconcileRequestStatus = in.ReconcileRequestStatus
|
||||
|
@ -621,11 +576,6 @@ func (in *HelmRepositorySpec) DeepCopyInto(out *HelmRepositorySpec) {
|
|||
*out = new(meta.LocalObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
if in.CertSecretRef != nil {
|
||||
in, out := &in.CertSecretRef, &out.CertSecretRef
|
||||
*out = new(meta.LocalObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
out.Interval = in.Interval
|
||||
if in.Timeout != nil {
|
||||
in, out := &in.Timeout, &out.Timeout
|
||||
|
@ -661,7 +611,7 @@ func (in *HelmRepositoryStatus) DeepCopyInto(out *HelmRepositoryStatus) {
|
|||
}
|
||||
if in.Artifact != nil {
|
||||
in, out := &in.Artifact, &out.Artifact
|
||||
*out = new(apiv1.Artifact)
|
||||
*out = new(Artifact)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
out.ReconcileRequestStatus = in.ReconcileRequestStatus
|
||||
|
@ -801,7 +751,7 @@ func (in *OCIRepositorySpec) DeepCopyInto(out *OCIRepositorySpec) {
|
|||
}
|
||||
if in.Verify != nil {
|
||||
in, out := &in.Verify, &out.Verify
|
||||
*out = new(apiv1.OCIRepositoryVerification)
|
||||
*out = new(OCIRepositoryVerification)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.CertSecretRef != nil {
|
||||
|
@ -809,11 +759,6 @@ func (in *OCIRepositorySpec) DeepCopyInto(out *OCIRepositorySpec) {
|
|||
*out = new(meta.LocalObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
if in.ProxySecretRef != nil {
|
||||
in, out := &in.ProxySecretRef, &out.ProxySecretRef
|
||||
*out = new(meta.LocalObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
out.Interval = in.Interval
|
||||
if in.Timeout != nil {
|
||||
in, out := &in.Timeout, &out.Timeout
|
||||
|
@ -849,7 +794,7 @@ func (in *OCIRepositoryStatus) DeepCopyInto(out *OCIRepositoryStatus) {
|
|||
}
|
||||
if in.Artifact != nil {
|
||||
in, out := &in.Artifact, &out.Artifact
|
||||
*out = new(apiv1.Artifact)
|
||||
*out = new(Artifact)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ObservedIgnore != nil {
|
||||
|
@ -874,3 +819,23 @@ func (in *OCIRepositoryStatus) DeepCopy() *OCIRepositoryStatus {
|
|||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *OCIRepositoryVerification) DeepCopyInto(out *OCIRepositoryVerification) {
|
||||
*out = *in
|
||||
if in.SecretRef != nil {
|
||||
in, out := &in.SecretRef, &out.SecretRef
|
||||
*out = new(meta.LocalObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OCIRepositoryVerification.
|
||||
func (in *OCIRepositoryVerification) DeepCopy() *OCIRepositoryVerification {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(OCIRepositoryVerification)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -3,7 +3,8 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.1
|
||||
controller-gen.kubebuilder.io/version: v0.8.0
|
||||
creationTimestamp: null
|
||||
name: helmrepositories.source.toolkit.fluxcd.io
|
||||
spec:
|
||||
group: source.toolkit.fluxcd.io
|
||||
|
@ -16,308 +17,6 @@ spec:
|
|||
singular: helmrepository
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- jsonPath: .spec.url
|
||||
name: URL
|
||||
type: string
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
- jsonPath: .status.conditions[?(@.type=="Ready")].status
|
||||
name: Ready
|
||||
type: string
|
||||
- jsonPath: .status.conditions[?(@.type=="Ready")].message
|
||||
name: Status
|
||||
type: string
|
||||
name: v1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: HelmRepository is the Schema for the helmrepositories API.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: |-
|
||||
HelmRepositorySpec specifies the required configuration to produce an
|
||||
Artifact for a Helm repository index YAML.
|
||||
properties:
|
||||
accessFrom:
|
||||
description: |-
|
||||
AccessFrom specifies an Access Control List for allowing cross-namespace
|
||||
references to this object.
|
||||
NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092
|
||||
properties:
|
||||
namespaceSelectors:
|
||||
description: |-
|
||||
NamespaceSelectors is the list of namespace selectors to which this ACL applies.
|
||||
Items in this list are evaluated using a logical OR operation.
|
||||
items:
|
||||
description: |-
|
||||
NamespaceSelector selects the namespaces to which this ACL applies.
|
||||
An empty map of MatchLabels matches all namespaces in a cluster.
|
||||
properties:
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
|
||||
map is equivalent to an element of matchExpressions, whose key field is "key", the
|
||||
operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
type: array
|
||||
required:
|
||||
- namespaceSelectors
|
||||
type: object
|
||||
certSecretRef:
|
||||
description: |-
|
||||
CertSecretRef can be given the name of a Secret containing
|
||||
either or both of
|
||||
|
||||
- a PEM-encoded client certificate (`tls.crt`) and private
|
||||
key (`tls.key`);
|
||||
- a PEM-encoded CA certificate (`ca.crt`)
|
||||
|
||||
and whichever are supplied, will be used for connecting to the
|
||||
registry. The client cert and key are useful if you are
|
||||
authenticating with a certificate; the CA cert is useful if
|
||||
you are using a self-signed server certificate. The Secret must
|
||||
be of type `Opaque` or `kubernetes.io/tls`.
|
||||
|
||||
It takes precedence over the values specified in the Secret referred
|
||||
to by `.spec.secretRef`.
|
||||
properties:
|
||||
name:
|
||||
description: Name of the referent.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
insecure:
|
||||
description: |-
|
||||
Insecure allows connecting to a non-TLS HTTP container registry.
|
||||
This field is only taken into account if the .spec.type field is set to 'oci'.
|
||||
type: boolean
|
||||
interval:
|
||||
description: |-
|
||||
Interval at which the HelmRepository URL is checked for updates.
|
||||
This interval is approximate and may be subject to jitter to ensure
|
||||
efficient use of resources.
|
||||
pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$
|
||||
type: string
|
||||
passCredentials:
|
||||
description: |-
|
||||
PassCredentials allows the credentials from the SecretRef to be passed
|
||||
on to a host that does not match the host as defined in URL.
|
||||
This may be required if the host of the advertised chart URLs in the
|
||||
index differ from the defined URL.
|
||||
Enabling this should be done with caution, as it can potentially result
|
||||
in credentials getting stolen in a MITM-attack.
|
||||
type: boolean
|
||||
provider:
|
||||
default: generic
|
||||
description: |-
|
||||
Provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'.
|
||||
This field is optional, and only taken into account if the .spec.type field is set to 'oci'.
|
||||
When not specified, defaults to 'generic'.
|
||||
enum:
|
||||
- generic
|
||||
- aws
|
||||
- azure
|
||||
- gcp
|
||||
type: string
|
||||
secretRef:
|
||||
description: |-
|
||||
SecretRef specifies the Secret containing authentication credentials
|
||||
for the HelmRepository.
|
||||
For HTTP/S basic auth the secret must contain 'username' and 'password'
|
||||
fields.
|
||||
Support for TLS auth using the 'certFile' and 'keyFile', and/or 'caFile'
|
||||
keys is deprecated. Please use `.spec.certSecretRef` instead.
|
||||
properties:
|
||||
name:
|
||||
description: Name of the referent.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
suspend:
|
||||
description: |-
|
||||
Suspend tells the controller to suspend the reconciliation of this
|
||||
HelmRepository.
|
||||
type: boolean
|
||||
timeout:
|
||||
description: |-
|
||||
Timeout is used for the index fetch operation for an HTTPS helm repository,
|
||||
and for remote OCI Repository operations like pulling for an OCI helm
|
||||
chart by the associated HelmChart.
|
||||
Its default value is 60s.
|
||||
pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$
|
||||
type: string
|
||||
type:
|
||||
description: |-
|
||||
Type of the HelmRepository.
|
||||
When this field is set to "oci", the URL field value must be prefixed with "oci://".
|
||||
enum:
|
||||
- default
|
||||
- oci
|
||||
type: string
|
||||
url:
|
||||
description: |-
|
||||
URL of the Helm repository, a valid URL contains at least a protocol and
|
||||
host.
|
||||
pattern: ^(http|https|oci)://.*$
|
||||
type: string
|
||||
required:
|
||||
- url
|
||||
type: object
|
||||
status:
|
||||
default:
|
||||
observedGeneration: -1
|
||||
description: HelmRepositoryStatus records the observed state of the HelmRepository.
|
||||
properties:
|
||||
artifact:
|
||||
description: Artifact represents the last successful HelmRepository
|
||||
reconciliation.
|
||||
properties:
|
||||
digest:
|
||||
description: Digest is the digest of the file in the form of '<algorithm>:<checksum>'.
|
||||
pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$
|
||||
type: string
|
||||
lastUpdateTime:
|
||||
description: |-
|
||||
LastUpdateTime is the timestamp corresponding to the last update of the
|
||||
Artifact.
|
||||
format: date-time
|
||||
type: string
|
||||
metadata:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: Metadata holds upstream information such as OCI annotations.
|
||||
type: object
|
||||
path:
|
||||
description: |-
|
||||
Path is the relative file path of the Artifact. It can be used to locate
|
||||
the file in the root of the Artifact storage on the local file system of
|
||||
the controller managing the Source.
|
||||
type: string
|
||||
revision:
|
||||
description: |-
|
||||
Revision is a human-readable identifier traceable in the origin source
|
||||
system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.
|
||||
type: string
|
||||
size:
|
||||
description: Size is the number of bytes in the file.
|
||||
format: int64
|
||||
type: integer
|
||||
url:
|
||||
description: |-
|
||||
URL is the HTTP address of the Artifact as exposed by the controller
|
||||
managing the Source. It can be used to retrieve the Artifact for
|
||||
consumption, e.g. by another controller applying the Artifact contents.
|
||||
type: string
|
||||
required:
|
||||
- lastUpdateTime
|
||||
- path
|
||||
- revision
|
||||
- url
|
||||
type: object
|
||||
conditions:
|
||||
description: Conditions holds the conditions for the HelmRepository.
|
||||
items:
|
||||
description: Condition contains details for one aspect of the current
|
||||
state of this API Resource.
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: |-
|
||||
lastTransitionTime is the last time the condition transitioned from one status to another.
|
||||
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: |-
|
||||
message is a human readable message indicating details about the transition.
|
||||
This may be an empty string.
|
||||
maxLength: 32768
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: |-
|
||||
observedGeneration represents the .metadata.generation that the condition was set based upon.
|
||||
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
|
||||
with respect to the current state of the instance.
|
||||
format: int64
|
||||
minimum: 0
|
||||
type: integer
|
||||
reason:
|
||||
description: |-
|
||||
reason contains a programmatic identifier indicating the reason for the condition's last transition.
|
||||
Producers of specific condition types may define expected values and meanings for this field,
|
||||
and whether the values are considered a guaranteed API.
|
||||
The value should be a CamelCase string.
|
||||
This field may not be empty.
|
||||
maxLength: 1024
|
||||
minLength: 1
|
||||
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
|
||||
type: string
|
||||
status:
|
||||
description: status of the condition, one of True, False, Unknown.
|
||||
enum:
|
||||
- "True"
|
||||
- "False"
|
||||
- Unknown
|
||||
type: string
|
||||
type:
|
||||
description: type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
maxLength: 316
|
||||
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
|
||||
type: string
|
||||
required:
|
||||
- lastTransitionTime
|
||||
- message
|
||||
- reason
|
||||
- status
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
lastHandledReconcileAt:
|
||||
description: |-
|
||||
LastHandledReconcileAt holds the value of the most recent
|
||||
reconcile request value, so a change of the annotation value
|
||||
can be detected.
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: |-
|
||||
ObservedGeneration is the last observed generation of the HelmRepository
|
||||
object.
|
||||
format: int64
|
||||
type: integer
|
||||
url:
|
||||
description: |-
|
||||
URL is the dynamic fetch link for the latest Artifact.
|
||||
It is provided on a "best effort" basis, and using the precise
|
||||
HelmRepositoryStatus.Artifact data is recommended.
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
- additionalPrinterColumns:
|
||||
- jsonPath: .spec.url
|
||||
name: URL
|
||||
|
@ -331,27 +30,20 @@ spec:
|
|||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
deprecated: true
|
||||
deprecationWarning: v1beta1 HelmRepository is deprecated, upgrade to v1
|
||||
name: v1beta1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: HelmRepository is the Schema for the helmrepositories API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
|
@ -363,21 +55,22 @@ spec:
|
|||
cross-namespace references to this object.
|
||||
properties:
|
||||
namespaceSelectors:
|
||||
description: |-
|
||||
NamespaceSelectors is the list of namespace selectors to which this ACL applies.
|
||||
Items in this list are evaluated using a logical OR operation.
|
||||
description: NamespaceSelectors is the list of namespace selectors
|
||||
to which this ACL applies. Items in this list are evaluated
|
||||
using a logical OR operation.
|
||||
items:
|
||||
description: |-
|
||||
NamespaceSelector selects the namespaces to which this ACL applies.
|
||||
An empty map of MatchLabels matches all namespaces in a cluster.
|
||||
description: NamespaceSelector selects the namespaces to which
|
||||
this ACL applies. An empty map of MatchLabels matches all
|
||||
namespaces in a cluster.
|
||||
properties:
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
|
||||
map is equivalent to an element of matchExpressions, whose key field is "key", the
|
||||
operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||
description: MatchLabels is a map of {key,value} pairs.
|
||||
A single {key,value} in the matchLabels map is equivalent
|
||||
to an element of matchExpressions, whose key field is
|
||||
"key", the operator is "In", and the values array contains
|
||||
only "value". The requirements are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
type: array
|
||||
|
@ -388,22 +81,18 @@ spec:
|
|||
description: The interval at which to check the upstream for updates.
|
||||
type: string
|
||||
passCredentials:
|
||||
description: |-
|
||||
PassCredentials allows the credentials from the SecretRef to be passed on to
|
||||
a host that does not match the host as defined in URL.
|
||||
This may be required if the host of the advertised chart URLs in the index
|
||||
differ from the defined URL.
|
||||
Enabling this should be done with caution, as it can potentially result in
|
||||
credentials getting stolen in a MITM-attack.
|
||||
description: PassCredentials allows the credentials from the SecretRef
|
||||
to be passed on to a host that does not match the host as defined
|
||||
in URL. This may be required if the host of the advertised chart
|
||||
URLs in the index differ from the defined URL. Enabling this should
|
||||
be done with caution, as it can potentially result in credentials
|
||||
getting stolen in a MITM-attack.
|
||||
type: boolean
|
||||
secretRef:
|
||||
description: |-
|
||||
The name of the secret containing authentication credentials for the Helm
|
||||
repository.
|
||||
For HTTP/S basic auth the secret must contain username and
|
||||
password fields.
|
||||
For TLS the secret must contain a certFile and keyFile, and/or
|
||||
caFile fields.
|
||||
description: The name of the secret containing authentication credentials
|
||||
for the Helm repository. For HTTP/S basic auth the secret must contain
|
||||
username and password fields. For TLS the secret must contain a
|
||||
certFile and keyFile, and/or caCert fields.
|
||||
properties:
|
||||
name:
|
||||
description: Name of the referent.
|
||||
|
@ -440,60 +129,65 @@ spec:
|
|||
description: Checksum is the SHA256 checksum of the artifact.
|
||||
type: string
|
||||
lastUpdateTime:
|
||||
description: |-
|
||||
LastUpdateTime is the timestamp corresponding to the last update of this
|
||||
artifact.
|
||||
description: LastUpdateTime is the timestamp corresponding to
|
||||
the last update of this artifact.
|
||||
format: date-time
|
||||
type: string
|
||||
path:
|
||||
description: Path is the relative file path of this artifact.
|
||||
type: string
|
||||
revision:
|
||||
description: |-
|
||||
Revision is a human readable identifier traceable in the origin source
|
||||
system. It can be a Git commit SHA, Git tag, a Helm index timestamp, a Helm
|
||||
chart version, etc.
|
||||
description: Revision is a human readable identifier traceable
|
||||
in the origin source system. It can be a Git commit SHA, Git
|
||||
tag, a Helm index timestamp, a Helm chart version, etc.
|
||||
type: string
|
||||
url:
|
||||
description: URL is the HTTP address of this artifact.
|
||||
type: string
|
||||
required:
|
||||
- lastUpdateTime
|
||||
- path
|
||||
- url
|
||||
type: object
|
||||
conditions:
|
||||
description: Conditions holds the conditions for the HelmRepository.
|
||||
items:
|
||||
description: Condition contains details for one aspect of the current
|
||||
state of this API Resource.
|
||||
description: "Condition contains details for one aspect of the current
|
||||
state of this API Resource. --- This struct is intended for direct
|
||||
use as an array at the field path .status.conditions. For example,
|
||||
\n type FooStatus struct{ // Represents the observations of a
|
||||
foo's current state. // Known .status.conditions.type are: \"Available\",
|
||||
\"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge
|
||||
// +listType=map // +listMapKey=type Conditions []metav1.Condition
|
||||
`json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\"
|
||||
protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }"
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: |-
|
||||
lastTransitionTime is the last time the condition transitioned from one status to another.
|
||||
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
|
||||
description: lastTransitionTime is the last time the condition
|
||||
transitioned from one status to another. This should be when
|
||||
the underlying condition changed. If that is not known, then
|
||||
using the time when the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: |-
|
||||
message is a human readable message indicating details about the transition.
|
||||
This may be an empty string.
|
||||
description: message is a human readable message indicating
|
||||
details about the transition. This may be an empty string.
|
||||
maxLength: 32768
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: |-
|
||||
observedGeneration represents the .metadata.generation that the condition was set based upon.
|
||||
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
|
||||
with respect to the current state of the instance.
|
||||
description: observedGeneration represents the .metadata.generation
|
||||
that the condition was set based upon. For instance, if .metadata.generation
|
||||
is currently 12, but the .status.conditions[x].observedGeneration
|
||||
is 9, the condition is out of date with respect to the current
|
||||
state of the instance.
|
||||
format: int64
|
||||
minimum: 0
|
||||
type: integer
|
||||
reason:
|
||||
description: |-
|
||||
reason contains a programmatic identifier indicating the reason for the condition's last transition.
|
||||
Producers of specific condition types may define expected values and meanings for this field,
|
||||
and whether the values are considered a guaranteed API.
|
||||
The value should be a CamelCase string.
|
||||
description: reason contains a programmatic identifier indicating
|
||||
the reason for the condition's last transition. Producers
|
||||
of specific condition types may define expected values and
|
||||
meanings for this field, and whether the values are considered
|
||||
a guaranteed API. The value should be a CamelCase string.
|
||||
This field may not be empty.
|
||||
maxLength: 1024
|
||||
minLength: 1
|
||||
|
@ -508,6 +202,10 @@ spec:
|
|||
type: string
|
||||
type:
|
||||
description: type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
--- Many .condition.type values are consistent across resources
|
||||
like Available, but because arbitrary conditions can be useful
|
||||
(see .node.status.conditions), the ability to deconflict is
|
||||
important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
|
||||
maxLength: 316
|
||||
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
|
||||
type: string
|
||||
|
@ -520,10 +218,9 @@ spec:
|
|||
type: object
|
||||
type: array
|
||||
lastHandledReconcileAt:
|
||||
description: |-
|
||||
LastHandledReconcileAt holds the value of the most recent
|
||||
reconcile request value, so a change of the annotation value
|
||||
can be detected.
|
||||
description: LastHandledReconcileAt holds the value of the most recent
|
||||
reconcile request value, so a change of the annotation value can
|
||||
be detected.
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: ObservedGeneration is the last observed generation.
|
||||
|
@ -551,114 +248,73 @@ spec:
|
|||
- jsonPath: .status.conditions[?(@.type=="Ready")].message
|
||||
name: Status
|
||||
type: string
|
||||
deprecated: true
|
||||
deprecationWarning: v1beta2 HelmRepository is deprecated, upgrade to v1
|
||||
name: v1beta2
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: HelmRepository is the Schema for the helmrepositories API.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: |-
|
||||
HelmRepositorySpec specifies the required configuration to produce an
|
||||
Artifact for a Helm repository index YAML.
|
||||
description: HelmRepositorySpec specifies the required configuration to
|
||||
produce an Artifact for a Helm repository index YAML.
|
||||
properties:
|
||||
accessFrom:
|
||||
description: |-
|
||||
AccessFrom specifies an Access Control List for allowing cross-namespace
|
||||
references to this object.
|
||||
NOTE: Not implemented, provisional as of https://github.com/fluxcd/flux2/pull/2092
|
||||
description: 'AccessFrom specifies an Access Control List for allowing
|
||||
cross-namespace references to this object. NOTE: Not implemented,
|
||||
provisional as of https://github.com/fluxcd/flux2/pull/2092'
|
||||
properties:
|
||||
namespaceSelectors:
|
||||
description: |-
|
||||
NamespaceSelectors is the list of namespace selectors to which this ACL applies.
|
||||
Items in this list are evaluated using a logical OR operation.
|
||||
description: NamespaceSelectors is the list of namespace selectors
|
||||
to which this ACL applies. Items in this list are evaluated
|
||||
using a logical OR operation.
|
||||
items:
|
||||
description: |-
|
||||
NamespaceSelector selects the namespaces to which this ACL applies.
|
||||
An empty map of MatchLabels matches all namespaces in a cluster.
|
||||
description: NamespaceSelector selects the namespaces to which
|
||||
this ACL applies. An empty map of MatchLabels matches all
|
||||
namespaces in a cluster.
|
||||
properties:
|
||||
matchLabels:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: |-
|
||||
MatchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
|
||||
map is equivalent to an element of matchExpressions, whose key field is "key", the
|
||||
operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||
description: MatchLabels is a map of {key,value} pairs.
|
||||
A single {key,value} in the matchLabels map is equivalent
|
||||
to an element of matchExpressions, whose key field is
|
||||
"key", the operator is "In", and the values array contains
|
||||
only "value". The requirements are ANDed.
|
||||
type: object
|
||||
type: object
|
||||
type: array
|
||||
required:
|
||||
- namespaceSelectors
|
||||
type: object
|
||||
certSecretRef:
|
||||
description: |-
|
||||
CertSecretRef can be given the name of a Secret containing
|
||||
either or both of
|
||||
|
||||
- a PEM-encoded client certificate (`tls.crt`) and private
|
||||
key (`tls.key`);
|
||||
- a PEM-encoded CA certificate (`ca.crt`)
|
||||
|
||||
and whichever are supplied, will be used for connecting to the
|
||||
registry. The client cert and key are useful if you are
|
||||
authenticating with a certificate; the CA cert is useful if
|
||||
you are using a self-signed server certificate. The Secret must
|
||||
be of type `Opaque` or `kubernetes.io/tls`.
|
||||
|
||||
It takes precedence over the values specified in the Secret referred
|
||||
to by `.spec.secretRef`.
|
||||
properties:
|
||||
name:
|
||||
description: Name of the referent.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
insecure:
|
||||
description: |-
|
||||
Insecure allows connecting to a non-TLS HTTP container registry.
|
||||
This field is only taken into account if the .spec.type field is set to 'oci'.
|
||||
type: boolean
|
||||
interval:
|
||||
description: |-
|
||||
Interval at which the HelmRepository URL is checked for updates.
|
||||
This interval is approximate and may be subject to jitter to ensure
|
||||
efficient use of resources.
|
||||
description: Interval at which to check the URL for updates.
|
||||
pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$
|
||||
type: string
|
||||
passCredentials:
|
||||
description: |-
|
||||
PassCredentials allows the credentials from the SecretRef to be passed
|
||||
on to a host that does not match the host as defined in URL.
|
||||
This may be required if the host of the advertised chart URLs in the
|
||||
index differ from the defined URL.
|
||||
Enabling this should be done with caution, as it can potentially result
|
||||
in credentials getting stolen in a MITM-attack.
|
||||
description: PassCredentials allows the credentials from the SecretRef
|
||||
to be passed on to a host that does not match the host as defined
|
||||
in URL. This may be required if the host of the advertised chart
|
||||
URLs in the index differ from the defined URL. Enabling this should
|
||||
be done with caution, as it can potentially result in credentials
|
||||
getting stolen in a MITM-attack.
|
||||
type: boolean
|
||||
provider:
|
||||
default: generic
|
||||
description: |-
|
||||
Provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'.
|
||||
This field is optional, and only taken into account if the .spec.type field is set to 'oci'.
|
||||
When not specified, defaults to 'generic'.
|
||||
description: Provider used for authentication, can be 'aws', 'azure',
|
||||
'gcp' or 'generic'. This field is optional, and only taken into
|
||||
account if the .spec.type field is set to 'oci'. When not specified,
|
||||
defaults to 'generic'.
|
||||
enum:
|
||||
- generic
|
||||
- aws
|
||||
|
@ -666,13 +322,10 @@ spec:
|
|||
- gcp
|
||||
type: string
|
||||
secretRef:
|
||||
description: |-
|
||||
SecretRef specifies the Secret containing authentication credentials
|
||||
for the HelmRepository.
|
||||
For HTTP/S basic auth the secret must contain 'username' and 'password'
|
||||
fields.
|
||||
Support for TLS auth using the 'certFile' and 'keyFile', and/or 'caFile'
|
||||
keys is deprecated. Please use `.spec.certSecretRef` instead.
|
||||
description: SecretRef specifies the Secret containing authentication
|
||||
credentials for the HelmRepository. For HTTP/S basic auth the secret
|
||||
must contain 'username' and 'password' fields. For TLS the secret
|
||||
must contain a 'certFile' and 'keyFile', and/or 'caCert' fields.
|
||||
properties:
|
||||
name:
|
||||
description: Name of the referent.
|
||||
|
@ -681,33 +334,29 @@ spec:
|
|||
- name
|
||||
type: object
|
||||
suspend:
|
||||
description: |-
|
||||
Suspend tells the controller to suspend the reconciliation of this
|
||||
HelmRepository.
|
||||
description: Suspend tells the controller to suspend the reconciliation
|
||||
of this HelmRepository.
|
||||
type: boolean
|
||||
timeout:
|
||||
description: |-
|
||||
Timeout is used for the index fetch operation for an HTTPS helm repository,
|
||||
and for remote OCI Repository operations like pulling for an OCI helm
|
||||
chart by the associated HelmChart.
|
||||
Its default value is 60s.
|
||||
default: 60s
|
||||
description: Timeout is used for the index fetch operation for an
|
||||
HTTPS helm repository, and for remote OCI Repository operations
|
||||
like pulling for an OCI helm repository. Its default value is 60s.
|
||||
pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$
|
||||
type: string
|
||||
type:
|
||||
description: |-
|
||||
Type of the HelmRepository.
|
||||
When this field is set to "oci", the URL field value must be prefixed with "oci://".
|
||||
description: Type of the HelmRepository. When this field is set to "oci",
|
||||
the URL field value must be prefixed with "oci://".
|
||||
enum:
|
||||
- default
|
||||
- oci
|
||||
type: string
|
||||
url:
|
||||
description: |-
|
||||
URL of the Helm repository, a valid URL contains at least a protocol and
|
||||
host.
|
||||
pattern: ^(http|https|oci)://.*$
|
||||
description: URL of the Helm repository, a valid URL contains at least
|
||||
a protocol and host.
|
||||
type: string
|
||||
required:
|
||||
- interval
|
||||
- url
|
||||
type: object
|
||||
status:
|
||||
|
@ -719,14 +368,12 @@ spec:
|
|||
description: Artifact represents the last successful HelmRepository
|
||||
reconciliation.
|
||||
properties:
|
||||
digest:
|
||||
description: Digest is the digest of the file in the form of '<algorithm>:<checksum>'.
|
||||
pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$
|
||||
checksum:
|
||||
description: Checksum is the SHA256 checksum of the Artifact file.
|
||||
type: string
|
||||
lastUpdateTime:
|
||||
description: |-
|
||||
LastUpdateTime is the timestamp corresponding to the last update of the
|
||||
Artifact.
|
||||
description: LastUpdateTime is the timestamp corresponding to
|
||||
the last update of the Artifact.
|
||||
format: date-time
|
||||
type: string
|
||||
metadata:
|
||||
|
@ -735,64 +382,69 @@ spec:
|
|||
description: Metadata holds upstream information such as OCI annotations.
|
||||
type: object
|
||||
path:
|
||||
description: |-
|
||||
Path is the relative file path of the Artifact. It can be used to locate
|
||||
the file in the root of the Artifact storage on the local file system of
|
||||
the controller managing the Source.
|
||||
description: Path is the relative file path of the Artifact. It
|
||||
can be used to locate the file in the root of the Artifact storage
|
||||
on the local file system of the controller managing the Source.
|
||||
type: string
|
||||
revision:
|
||||
description: |-
|
||||
Revision is a human-readable identifier traceable in the origin source
|
||||
system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.
|
||||
description: Revision is a human-readable identifier traceable
|
||||
in the origin source system. It can be a Git commit SHA, Git
|
||||
tag, a Helm chart version, etc.
|
||||
type: string
|
||||
size:
|
||||
description: Size is the number of bytes in the file.
|
||||
format: int64
|
||||
type: integer
|
||||
url:
|
||||
description: |-
|
||||
URL is the HTTP address of the Artifact as exposed by the controller
|
||||
managing the Source. It can be used to retrieve the Artifact for
|
||||
consumption, e.g. by another controller applying the Artifact contents.
|
||||
description: URL is the HTTP address of the Artifact as exposed
|
||||
by the controller managing the Source. It can be used to retrieve
|
||||
the Artifact for consumption, e.g. by another controller applying
|
||||
the Artifact contents.
|
||||
type: string
|
||||
required:
|
||||
- lastUpdateTime
|
||||
- path
|
||||
- revision
|
||||
- url
|
||||
type: object
|
||||
conditions:
|
||||
description: Conditions holds the conditions for the HelmRepository.
|
||||
items:
|
||||
description: Condition contains details for one aspect of the current
|
||||
state of this API Resource.
|
||||
description: "Condition contains details for one aspect of the current
|
||||
state of this API Resource. --- This struct is intended for direct
|
||||
use as an array at the field path .status.conditions. For example,
|
||||
\n type FooStatus struct{ // Represents the observations of a
|
||||
foo's current state. // Known .status.conditions.type are: \"Available\",
|
||||
\"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge
|
||||
// +listType=map // +listMapKey=type Conditions []metav1.Condition
|
||||
`json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\"
|
||||
protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }"
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: |-
|
||||
lastTransitionTime is the last time the condition transitioned from one status to another.
|
||||
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
|
||||
description: lastTransitionTime is the last time the condition
|
||||
transitioned from one status to another. This should be when
|
||||
the underlying condition changed. If that is not known, then
|
||||
using the time when the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: |-
|
||||
message is a human readable message indicating details about the transition.
|
||||
This may be an empty string.
|
||||
description: message is a human readable message indicating
|
||||
details about the transition. This may be an empty string.
|
||||
maxLength: 32768
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: |-
|
||||
observedGeneration represents the .metadata.generation that the condition was set based upon.
|
||||
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
|
||||
with respect to the current state of the instance.
|
||||
description: observedGeneration represents the .metadata.generation
|
||||
that the condition was set based upon. For instance, if .metadata.generation
|
||||
is currently 12, but the .status.conditions[x].observedGeneration
|
||||
is 9, the condition is out of date with respect to the current
|
||||
state of the instance.
|
||||
format: int64
|
||||
minimum: 0
|
||||
type: integer
|
||||
reason:
|
||||
description: |-
|
||||
reason contains a programmatic identifier indicating the reason for the condition's last transition.
|
||||
Producers of specific condition types may define expected values and meanings for this field,
|
||||
and whether the values are considered a guaranteed API.
|
||||
The value should be a CamelCase string.
|
||||
description: reason contains a programmatic identifier indicating
|
||||
the reason for the condition's last transition. Producers
|
||||
of specific condition types may define expected values and
|
||||
meanings for this field, and whether the values are considered
|
||||
a guaranteed API. The value should be a CamelCase string.
|
||||
This field may not be empty.
|
||||
maxLength: 1024
|
||||
minLength: 1
|
||||
|
@ -807,6 +459,10 @@ spec:
|
|||
type: string
|
||||
type:
|
||||
description: type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
--- Many .condition.type values are consistent across resources
|
||||
like Available, but because arbitrary conditions can be useful
|
||||
(see .node.status.conditions), the ability to deconflict is
|
||||
important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
|
||||
maxLength: 316
|
||||
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
|
||||
type: string
|
||||
|
@ -819,26 +475,29 @@ spec:
|
|||
type: object
|
||||
type: array
|
||||
lastHandledReconcileAt:
|
||||
description: |-
|
||||
LastHandledReconcileAt holds the value of the most recent
|
||||
reconcile request value, so a change of the annotation value
|
||||
can be detected.
|
||||
description: LastHandledReconcileAt holds the value of the most recent
|
||||
reconcile request value, so a change of the annotation value can
|
||||
be detected.
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: |-
|
||||
ObservedGeneration is the last observed generation of the HelmRepository
|
||||
object.
|
||||
description: ObservedGeneration is the last observed generation of
|
||||
the HelmRepository object.
|
||||
format: int64
|
||||
type: integer
|
||||
url:
|
||||
description: |-
|
||||
URL is the dynamic fetch link for the latest Artifact.
|
||||
It is provided on a "best effort" basis, and using the precise
|
||||
HelmRepositoryStatus.Artifact data is recommended.
|
||||
description: URL is the dynamic fetch link for the latest Artifact.
|
||||
It is provided on a "best effort" basis, and using the precise HelmRepositoryStatus.Artifact
|
||||
data is recommended.
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
storage: false
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
status:
|
||||
acceptedNames:
|
||||
kind: ""
|
||||
plural: ""
|
||||
conditions: []
|
||||
storedVersions: []
|
||||
|
|
|
@ -3,7 +3,8 @@ apiVersion: apiextensions.k8s.io/v1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.16.1
|
||||
controller-gen.kubebuilder.io/version: v0.8.0
|
||||
creationTimestamp: null
|
||||
name: ocirepositories.source.toolkit.fluxcd.io
|
||||
spec:
|
||||
group: source.toolkit.fluxcd.io
|
||||
|
@ -29,25 +30,20 @@ spec:
|
|||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
name: v1
|
||||
name: v1beta2
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: OCIRepository is the Schema for the ocirepositories API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
|
@ -55,19 +51,13 @@ spec:
|
|||
description: OCIRepositorySpec defines the desired state of OCIRepository
|
||||
properties:
|
||||
certSecretRef:
|
||||
description: |-
|
||||
CertSecretRef can be given the name of a Secret containing
|
||||
either or both of
|
||||
|
||||
- a PEM-encoded client certificate (`tls.crt`) and private
|
||||
key (`tls.key`);
|
||||
- a PEM-encoded CA certificate (`ca.crt`)
|
||||
|
||||
and whichever are supplied, will be used for connecting to the
|
||||
registry. The client cert and key are useful if you are
|
||||
authenticating with a certificate; the CA cert is useful if
|
||||
you are using a self-signed server certificate. The Secret must
|
||||
be of type `Opaque` or `kubernetes.io/tls`.
|
||||
description: "CertSecretRef can be given the name of a secret containing
|
||||
either or both of \n - a PEM-encoded client certificate (`certFile`)
|
||||
and private key (`keyFile`); - a PEM-encoded CA certificate (`caFile`)
|
||||
\n and whichever are supplied, will be used for connecting to the
|
||||
registry. The client cert and key are useful if you are authenticating
|
||||
with a certificate; the CA cert is useful if you are using a self-signed
|
||||
server certificate."
|
||||
properties:
|
||||
name:
|
||||
description: Name of the referent.
|
||||
|
@ -76,39 +66,34 @@ spec:
|
|||
- name
|
||||
type: object
|
||||
ignore:
|
||||
description: |-
|
||||
Ignore overrides the set of excluded patterns in the .sourceignore format
|
||||
(which is the same as .gitignore). If not provided, a default will be used,
|
||||
consult the documentation for your version to find out what those are.
|
||||
description: Ignore overrides the set of excluded patterns in the
|
||||
.sourceignore format (which is the same as .gitignore). If not provided,
|
||||
a default will be used, consult the documentation for your version
|
||||
to find out what those are.
|
||||
type: string
|
||||
insecure:
|
||||
description: Insecure allows connecting to a non-TLS HTTP container
|
||||
registry.
|
||||
type: boolean
|
||||
interval:
|
||||
description: |-
|
||||
Interval at which the OCIRepository URL is checked for updates.
|
||||
This interval is approximate and may be subject to jitter to ensure
|
||||
efficient use of resources.
|
||||
description: The interval at which to check for image updates.
|
||||
pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$
|
||||
type: string
|
||||
layerSelector:
|
||||
description: |-
|
||||
LayerSelector specifies which layer should be extracted from the OCI artifact.
|
||||
When not specified, the first layer found in the artifact is selected.
|
||||
description: LayerSelector specifies which layer should be extracted
|
||||
from the OCI artifact. When not specified, the first layer found
|
||||
in the artifact is selected.
|
||||
properties:
|
||||
mediaType:
|
||||
description: |-
|
||||
MediaType specifies the OCI media type of the layer
|
||||
which should be extracted from the OCI Artifact. The
|
||||
first layer matching this type is selected.
|
||||
description: MediaType specifies the OCI media type of the layer
|
||||
which should be extracted from the OCI Artifact. The first layer
|
||||
matching this type is selected.
|
||||
type: string
|
||||
operation:
|
||||
description: |-
|
||||
Operation specifies how the selected layer should be processed.
|
||||
By default, the layer compressed content is extracted to storage.
|
||||
When the operation is set to 'copy', the layer compressed content
|
||||
is persisted to storage as it is.
|
||||
description: Operation specifies how the selected layer should
|
||||
be processed. By default, the layer compressed content is extracted
|
||||
to storage. When the operation is set to 'copy', the layer compressed
|
||||
content is persisted to storage as it is.
|
||||
enum:
|
||||
- extract
|
||||
- copy
|
||||
|
@ -116,54 +101,34 @@ spec:
|
|||
type: object
|
||||
provider:
|
||||
default: generic
|
||||
description: |-
|
||||
The provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'.
|
||||
When not specified, defaults to 'generic'.
|
||||
description: The provider used for authentication, can be 'aws', 'azure',
|
||||
'gcp' or 'generic'. When not specified, defaults to 'generic'.
|
||||
enum:
|
||||
- generic
|
||||
- aws
|
||||
- azure
|
||||
- gcp
|
||||
type: string
|
||||
proxySecretRef:
|
||||
description: |-
|
||||
ProxySecretRef specifies the Secret containing the proxy configuration
|
||||
to use while communicating with the container registry.
|
||||
properties:
|
||||
name:
|
||||
description: Name of the referent.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
ref:
|
||||
description: |-
|
||||
The OCI reference to pull and monitor for changes,
|
||||
defaults to the latest tag.
|
||||
description: The OCI reference to pull and monitor for changes, defaults
|
||||
to the latest tag.
|
||||
properties:
|
||||
digest:
|
||||
description: |-
|
||||
Digest is the image digest to pull, takes precedence over SemVer.
|
||||
The value should be in the format 'sha256:<HASH>'.
|
||||
description: Digest is the image digest to pull, takes precedence
|
||||
over SemVer. The value should be in the format 'sha256:<HASH>'.
|
||||
type: string
|
||||
semver:
|
||||
description: |-
|
||||
SemVer is the range of tags to pull selecting the latest within
|
||||
the range, takes precedence over Tag.
|
||||
type: string
|
||||
semverFilter:
|
||||
description: SemverFilter is a regex pattern to filter the tags
|
||||
within the SemVer range.
|
||||
description: SemVer is the range of tags to pull selecting the
|
||||
latest within the range, takes precedence over Tag.
|
||||
type: string
|
||||
tag:
|
||||
description: Tag is the image tag to pull, defaults to latest.
|
||||
type: string
|
||||
type: object
|
||||
secretRef:
|
||||
description: |-
|
||||
SecretRef contains the secret name containing the registry login
|
||||
credentials to resolve image metadata.
|
||||
The secret must be of type kubernetes.io/dockerconfigjson.
|
||||
description: SecretRef contains the secret name containing the registry
|
||||
login credentials to resolve image metadata. The secret must be
|
||||
of type kubernetes.io/dockerconfigjson.
|
||||
properties:
|
||||
name:
|
||||
description: Name of the referent.
|
||||
|
@ -172,10 +137,9 @@ spec:
|
|||
- name
|
||||
type: object
|
||||
serviceAccountName:
|
||||
description: |-
|
||||
ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate
|
||||
the image pull if the service account has attached pull secrets. For more information:
|
||||
https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account
|
||||
description: 'ServiceAccountName is the name of the Kubernetes ServiceAccount
|
||||
used to authenticate the image pull if the service account has attached
|
||||
pull secrets. For more information: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account'
|
||||
type: string
|
||||
suspend:
|
||||
description: This flag tells the controller to suspend the reconciliation
|
||||
|
@ -188,57 +152,25 @@ spec:
|
|||
pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$
|
||||
type: string
|
||||
url:
|
||||
description: |-
|
||||
URL is a reference to an OCI artifact repository hosted
|
||||
description: URL is a reference to an OCI artifact repository hosted
|
||||
on a remote container registry.
|
||||
pattern: ^oci://.*$
|
||||
type: string
|
||||
verify:
|
||||
description: |-
|
||||
Verify contains the secret name containing the trusted public keys
|
||||
used to verify the signature and specifies which provider to use to check
|
||||
whether OCI image is authentic.
|
||||
description: Verify contains the secret name containing the trusted
|
||||
public keys used to verify the signature and specifies which provider
|
||||
to use to check whether OCI image is authentic.
|
||||
properties:
|
||||
matchOIDCIdentity:
|
||||
description: |-
|
||||
MatchOIDCIdentity specifies the identity matching criteria to use
|
||||
while verifying an OCI artifact which was signed using Cosign keyless
|
||||
signing. The artifact's identity is deemed to be verified if any of the
|
||||
specified matchers match against the identity.
|
||||
items:
|
||||
description: |-
|
||||
OIDCIdentityMatch specifies options for verifying the certificate identity,
|
||||
i.e. the issuer and the subject of the certificate.
|
||||
properties:
|
||||
issuer:
|
||||
description: |-
|
||||
Issuer specifies the regex pattern to match against to verify
|
||||
the OIDC issuer in the Fulcio certificate. The pattern must be a
|
||||
valid Go regular expression.
|
||||
type: string
|
||||
subject:
|
||||
description: |-
|
||||
Subject specifies the regex pattern to match against to verify
|
||||
the identity subject in the Fulcio certificate. The pattern must
|
||||
be a valid Go regular expression.
|
||||
type: string
|
||||
required:
|
||||
- issuer
|
||||
- subject
|
||||
type: object
|
||||
type: array
|
||||
provider:
|
||||
default: cosign
|
||||
description: Provider specifies the technology used to sign the
|
||||
OCI Artifact.
|
||||
enum:
|
||||
- cosign
|
||||
- notation
|
||||
type: string
|
||||
secretRef:
|
||||
description: |-
|
||||
SecretRef specifies the Kubernetes Secret containing the
|
||||
trusted public keys.
|
||||
description: SecretRef specifies the Kubernetes Secret containing
|
||||
the trusted public keys.
|
||||
properties:
|
||||
name:
|
||||
description: Name of the referent.
|
||||
|
@ -262,14 +194,12 @@ spec:
|
|||
description: Artifact represents the output of the last successful
|
||||
OCI Repository sync.
|
||||
properties:
|
||||
digest:
|
||||
description: Digest is the digest of the file in the form of '<algorithm>:<checksum>'.
|
||||
pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$
|
||||
checksum:
|
||||
description: Checksum is the SHA256 checksum of the Artifact file.
|
||||
type: string
|
||||
lastUpdateTime:
|
||||
description: |-
|
||||
LastUpdateTime is the timestamp corresponding to the last update of the
|
||||
Artifact.
|
||||
description: LastUpdateTime is the timestamp corresponding to
|
||||
the last update of the Artifact.
|
||||
format: date-time
|
||||
type: string
|
||||
metadata:
|
||||
|
@ -278,64 +208,69 @@ spec:
|
|||
description: Metadata holds upstream information such as OCI annotations.
|
||||
type: object
|
||||
path:
|
||||
description: |-
|
||||
Path is the relative file path of the Artifact. It can be used to locate
|
||||
the file in the root of the Artifact storage on the local file system of
|
||||
the controller managing the Source.
|
||||
description: Path is the relative file path of the Artifact. It
|
||||
can be used to locate the file in the root of the Artifact storage
|
||||
on the local file system of the controller managing the Source.
|
||||
type: string
|
||||
revision:
|
||||
description: |-
|
||||
Revision is a human-readable identifier traceable in the origin source
|
||||
system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.
|
||||
description: Revision is a human-readable identifier traceable
|
||||
in the origin source system. It can be a Git commit SHA, Git
|
||||
tag, a Helm chart version, etc.
|
||||
type: string
|
||||
size:
|
||||
description: Size is the number of bytes in the file.
|
||||
format: int64
|
||||
type: integer
|
||||
url:
|
||||
description: |-
|
||||
URL is the HTTP address of the Artifact as exposed by the controller
|
||||
managing the Source. It can be used to retrieve the Artifact for
|
||||
consumption, e.g. by another controller applying the Artifact contents.
|
||||
description: URL is the HTTP address of the Artifact as exposed
|
||||
by the controller managing the Source. It can be used to retrieve
|
||||
the Artifact for consumption, e.g. by another controller applying
|
||||
the Artifact contents.
|
||||
type: string
|
||||
required:
|
||||
- lastUpdateTime
|
||||
- path
|
||||
- revision
|
||||
- url
|
||||
type: object
|
||||
conditions:
|
||||
description: Conditions holds the conditions for the OCIRepository.
|
||||
items:
|
||||
description: Condition contains details for one aspect of the current
|
||||
state of this API Resource.
|
||||
description: "Condition contains details for one aspect of the current
|
||||
state of this API Resource. --- This struct is intended for direct
|
||||
use as an array at the field path .status.conditions. For example,
|
||||
\n type FooStatus struct{ // Represents the observations of a
|
||||
foo's current state. // Known .status.conditions.type are: \"Available\",
|
||||
\"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge
|
||||
// +listType=map // +listMapKey=type Conditions []metav1.Condition
|
||||
`json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\"
|
||||
protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }"
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: |-
|
||||
lastTransitionTime is the last time the condition transitioned from one status to another.
|
||||
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
|
||||
description: lastTransitionTime is the last time the condition
|
||||
transitioned from one status to another. This should be when
|
||||
the underlying condition changed. If that is not known, then
|
||||
using the time when the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: |-
|
||||
message is a human readable message indicating details about the transition.
|
||||
This may be an empty string.
|
||||
description: message is a human readable message indicating
|
||||
details about the transition. This may be an empty string.
|
||||
maxLength: 32768
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: |-
|
||||
observedGeneration represents the .metadata.generation that the condition was set based upon.
|
||||
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
|
||||
with respect to the current state of the instance.
|
||||
description: observedGeneration represents the .metadata.generation
|
||||
that the condition was set based upon. For instance, if .metadata.generation
|
||||
is currently 12, but the .status.conditions[x].observedGeneration
|
||||
is 9, the condition is out of date with respect to the current
|
||||
state of the instance.
|
||||
format: int64
|
||||
minimum: 0
|
||||
type: integer
|
||||
reason:
|
||||
description: |-
|
||||
reason contains a programmatic identifier indicating the reason for the condition's last transition.
|
||||
Producers of specific condition types may define expected values and meanings for this field,
|
||||
and whether the values are considered a guaranteed API.
|
||||
The value should be a CamelCase string.
|
||||
description: reason contains a programmatic identifier indicating
|
||||
the reason for the condition's last transition. Producers
|
||||
of specific condition types may define expected values and
|
||||
meanings for this field, and whether the values are considered
|
||||
a guaranteed API. The value should be a CamelCase string.
|
||||
This field may not be empty.
|
||||
maxLength: 1024
|
||||
minLength: 1
|
||||
|
@ -350,6 +285,10 @@ spec:
|
|||
type: string
|
||||
type:
|
||||
description: type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
--- Many .condition.type values are consistent across resources
|
||||
like Available, but because arbitrary conditions can be useful
|
||||
(see .node.status.conditions), the ability to deconflict is
|
||||
important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
|
||||
maxLength: 316
|
||||
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
|
||||
type: string
|
||||
|
@ -361,38 +300,43 @@ spec:
|
|||
- type
|
||||
type: object
|
||||
type: array
|
||||
contentConfigChecksum:
|
||||
description: "ContentConfigChecksum is a checksum of all the configurations
|
||||
related to the content of the source artifact: - .spec.ignore -
|
||||
.spec.layerSelector observed in .status.observedGeneration version
|
||||
of the object. This can be used to determine if the content configuration
|
||||
has changed and the artifact needs to be rebuilt. It has the format
|
||||
of `<algo>:<checksum>`, for example: `sha256:<checksum>`. \n Deprecated:
|
||||
Replaced with explicit fields for observed artifact content config
|
||||
in the status."
|
||||
type: string
|
||||
lastHandledReconcileAt:
|
||||
description: |-
|
||||
LastHandledReconcileAt holds the value of the most recent
|
||||
reconcile request value, so a change of the annotation value
|
||||
can be detected.
|
||||
description: LastHandledReconcileAt holds the value of the most recent
|
||||
reconcile request value, so a change of the annotation value can
|
||||
be detected.
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: ObservedGeneration is the last observed generation.
|
||||
format: int64
|
||||
type: integer
|
||||
observedIgnore:
|
||||
description: |-
|
||||
ObservedIgnore is the observed exclusion patterns used for constructing
|
||||
the source artifact.
|
||||
description: ObservedIgnore is the observed exclusion patterns used
|
||||
for constructing the source artifact.
|
||||
type: string
|
||||
observedLayerSelector:
|
||||
description: |-
|
||||
ObservedLayerSelector is the observed layer selector used for constructing
|
||||
the source artifact.
|
||||
description: ObservedLayerSelector is the observed layer selector
|
||||
used for constructing the source artifact.
|
||||
properties:
|
||||
mediaType:
|
||||
description: |-
|
||||
MediaType specifies the OCI media type of the layer
|
||||
which should be extracted from the OCI Artifact. The
|
||||
first layer matching this type is selected.
|
||||
description: MediaType specifies the OCI media type of the layer
|
||||
which should be extracted from the OCI Artifact. The first layer
|
||||
matching this type is selected.
|
||||
type: string
|
||||
operation:
|
||||
description: |-
|
||||
Operation specifies how the selected layer should be processed.
|
||||
By default, the layer compressed content is extracted to storage.
|
||||
When the operation is set to 'copy', the layer compressed content
|
||||
is persisted to storage as it is.
|
||||
description: Operation specifies how the selected layer should
|
||||
be processed. By default, the layer compressed content is extracted
|
||||
to storage. When the operation is set to 'copy', the layer compressed
|
||||
content is persisted to storage as it is.
|
||||
enum:
|
||||
- extract
|
||||
- copy
|
||||
|
@ -408,414 +352,9 @@ spec:
|
|||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
- additionalPrinterColumns:
|
||||
- jsonPath: .spec.url
|
||||
name: URL
|
||||
type: string
|
||||
- jsonPath: .status.conditions[?(@.type=="Ready")].status
|
||||
name: Ready
|
||||
type: string
|
||||
- jsonPath: .status.conditions[?(@.type=="Ready")].message
|
||||
name: Status
|
||||
type: string
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
deprecated: true
|
||||
deprecationWarning: v1beta2 OCIRepository is deprecated, upgrade to v1
|
||||
name: v1beta2
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: OCIRepository is the Schema for the ocirepositories API
|
||||
properties:
|
||||
apiVersion:
|
||||
description: |-
|
||||
APIVersion defines the versioned schema of this representation of an object.
|
||||
Servers should convert recognized schemas to the latest internal value, and
|
||||
may reject unrecognized values.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
|
||||
type: string
|
||||
kind:
|
||||
description: |-
|
||||
Kind is a string value representing the REST resource this object represents.
|
||||
Servers may infer this from the endpoint the client submits requests to.
|
||||
Cannot be updated.
|
||||
In CamelCase.
|
||||
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: OCIRepositorySpec defines the desired state of OCIRepository
|
||||
properties:
|
||||
certSecretRef:
|
||||
description: |-
|
||||
CertSecretRef can be given the name of a Secret containing
|
||||
either or both of
|
||||
|
||||
- a PEM-encoded client certificate (`tls.crt`) and private
|
||||
key (`tls.key`);
|
||||
- a PEM-encoded CA certificate (`ca.crt`)
|
||||
|
||||
and whichever are supplied, will be used for connecting to the
|
||||
registry. The client cert and key are useful if you are
|
||||
authenticating with a certificate; the CA cert is useful if
|
||||
you are using a self-signed server certificate. The Secret must
|
||||
be of type `Opaque` or `kubernetes.io/tls`.
|
||||
|
||||
Note: Support for the `caFile`, `certFile` and `keyFile` keys have
|
||||
been deprecated.
|
||||
properties:
|
||||
name:
|
||||
description: Name of the referent.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
ignore:
|
||||
description: |-
|
||||
Ignore overrides the set of excluded patterns in the .sourceignore format
|
||||
(which is the same as .gitignore). If not provided, a default will be used,
|
||||
consult the documentation for your version to find out what those are.
|
||||
type: string
|
||||
insecure:
|
||||
description: Insecure allows connecting to a non-TLS HTTP container
|
||||
registry.
|
||||
type: boolean
|
||||
interval:
|
||||
description: |-
|
||||
Interval at which the OCIRepository URL is checked for updates.
|
||||
This interval is approximate and may be subject to jitter to ensure
|
||||
efficient use of resources.
|
||||
pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m|h))+$
|
||||
type: string
|
||||
layerSelector:
|
||||
description: |-
|
||||
LayerSelector specifies which layer should be extracted from the OCI artifact.
|
||||
When not specified, the first layer found in the artifact is selected.
|
||||
properties:
|
||||
mediaType:
|
||||
description: |-
|
||||
MediaType specifies the OCI media type of the layer
|
||||
which should be extracted from the OCI Artifact. The
|
||||
first layer matching this type is selected.
|
||||
type: string
|
||||
operation:
|
||||
description: |-
|
||||
Operation specifies how the selected layer should be processed.
|
||||
By default, the layer compressed content is extracted to storage.
|
||||
When the operation is set to 'copy', the layer compressed content
|
||||
is persisted to storage as it is.
|
||||
enum:
|
||||
- extract
|
||||
- copy
|
||||
type: string
|
||||
type: object
|
||||
provider:
|
||||
default: generic
|
||||
description: |-
|
||||
The provider used for authentication, can be 'aws', 'azure', 'gcp' or 'generic'.
|
||||
When not specified, defaults to 'generic'.
|
||||
enum:
|
||||
- generic
|
||||
- aws
|
||||
- azure
|
||||
- gcp
|
||||
type: string
|
||||
proxySecretRef:
|
||||
description: |-
|
||||
ProxySecretRef specifies the Secret containing the proxy configuration
|
||||
to use while communicating with the container registry.
|
||||
properties:
|
||||
name:
|
||||
description: Name of the referent.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
ref:
|
||||
description: |-
|
||||
The OCI reference to pull and monitor for changes,
|
||||
defaults to the latest tag.
|
||||
properties:
|
||||
digest:
|
||||
description: |-
|
||||
Digest is the image digest to pull, takes precedence over SemVer.
|
||||
The value should be in the format 'sha256:<HASH>'.
|
||||
type: string
|
||||
semver:
|
||||
description: |-
|
||||
SemVer is the range of tags to pull selecting the latest within
|
||||
the range, takes precedence over Tag.
|
||||
type: string
|
||||
semverFilter:
|
||||
description: SemverFilter is a regex pattern to filter the tags
|
||||
within the SemVer range.
|
||||
type: string
|
||||
tag:
|
||||
description: Tag is the image tag to pull, defaults to latest.
|
||||
type: string
|
||||
type: object
|
||||
secretRef:
|
||||
description: |-
|
||||
SecretRef contains the secret name containing the registry login
|
||||
credentials to resolve image metadata.
|
||||
The secret must be of type kubernetes.io/dockerconfigjson.
|
||||
properties:
|
||||
name:
|
||||
description: Name of the referent.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
serviceAccountName:
|
||||
description: |-
|
||||
ServiceAccountName is the name of the Kubernetes ServiceAccount used to authenticate
|
||||
the image pull if the service account has attached pull secrets. For more information:
|
||||
https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-imagepullsecrets-to-a-service-account
|
||||
type: string
|
||||
suspend:
|
||||
description: This flag tells the controller to suspend the reconciliation
|
||||
of this source.
|
||||
type: boolean
|
||||
timeout:
|
||||
default: 60s
|
||||
description: The timeout for remote OCI Repository operations like
|
||||
pulling, defaults to 60s.
|
||||
pattern: ^([0-9]+(\.[0-9]+)?(ms|s|m))+$
|
||||
type: string
|
||||
url:
|
||||
description: |-
|
||||
URL is a reference to an OCI artifact repository hosted
|
||||
on a remote container registry.
|
||||
pattern: ^oci://.*$
|
||||
type: string
|
||||
verify:
|
||||
description: |-
|
||||
Verify contains the secret name containing the trusted public keys
|
||||
used to verify the signature and specifies which provider to use to check
|
||||
whether OCI image is authentic.
|
||||
properties:
|
||||
matchOIDCIdentity:
|
||||
description: |-
|
||||
MatchOIDCIdentity specifies the identity matching criteria to use
|
||||
while verifying an OCI artifact which was signed using Cosign keyless
|
||||
signing. The artifact's identity is deemed to be verified if any of the
|
||||
specified matchers match against the identity.
|
||||
items:
|
||||
description: |-
|
||||
OIDCIdentityMatch specifies options for verifying the certificate identity,
|
||||
i.e. the issuer and the subject of the certificate.
|
||||
properties:
|
||||
issuer:
|
||||
description: |-
|
||||
Issuer specifies the regex pattern to match against to verify
|
||||
the OIDC issuer in the Fulcio certificate. The pattern must be a
|
||||
valid Go regular expression.
|
||||
type: string
|
||||
subject:
|
||||
description: |-
|
||||
Subject specifies the regex pattern to match against to verify
|
||||
the identity subject in the Fulcio certificate. The pattern must
|
||||
be a valid Go regular expression.
|
||||
type: string
|
||||
required:
|
||||
- issuer
|
||||
- subject
|
||||
type: object
|
||||
type: array
|
||||
provider:
|
||||
default: cosign
|
||||
description: Provider specifies the technology used to sign the
|
||||
OCI Artifact.
|
||||
enum:
|
||||
- cosign
|
||||
- notation
|
||||
type: string
|
||||
secretRef:
|
||||
description: |-
|
||||
SecretRef specifies the Kubernetes Secret containing the
|
||||
trusted public keys.
|
||||
properties:
|
||||
name:
|
||||
description: Name of the referent.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
required:
|
||||
- provider
|
||||
type: object
|
||||
required:
|
||||
- interval
|
||||
- url
|
||||
type: object
|
||||
status:
|
||||
default:
|
||||
observedGeneration: -1
|
||||
description: OCIRepositoryStatus defines the observed state of OCIRepository
|
||||
properties:
|
||||
artifact:
|
||||
description: Artifact represents the output of the last successful
|
||||
OCI Repository sync.
|
||||
properties:
|
||||
digest:
|
||||
description: Digest is the digest of the file in the form of '<algorithm>:<checksum>'.
|
||||
pattern: ^[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+$
|
||||
type: string
|
||||
lastUpdateTime:
|
||||
description: |-
|
||||
LastUpdateTime is the timestamp corresponding to the last update of the
|
||||
Artifact.
|
||||
format: date-time
|
||||
type: string
|
||||
metadata:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: Metadata holds upstream information such as OCI annotations.
|
||||
type: object
|
||||
path:
|
||||
description: |-
|
||||
Path is the relative file path of the Artifact. It can be used to locate
|
||||
the file in the root of the Artifact storage on the local file system of
|
||||
the controller managing the Source.
|
||||
type: string
|
||||
revision:
|
||||
description: |-
|
||||
Revision is a human-readable identifier traceable in the origin source
|
||||
system. It can be a Git commit SHA, Git tag, a Helm chart version, etc.
|
||||
type: string
|
||||
size:
|
||||
description: Size is the number of bytes in the file.
|
||||
format: int64
|
||||
type: integer
|
||||
url:
|
||||
description: |-
|
||||
URL is the HTTP address of the Artifact as exposed by the controller
|
||||
managing the Source. It can be used to retrieve the Artifact for
|
||||
consumption, e.g. by another controller applying the Artifact contents.
|
||||
type: string
|
||||
required:
|
||||
- lastUpdateTime
|
||||
- path
|
||||
- revision
|
||||
- url
|
||||
type: object
|
||||
conditions:
|
||||
description: Conditions holds the conditions for the OCIRepository.
|
||||
items:
|
||||
description: Condition contains details for one aspect of the current
|
||||
state of this API Resource.
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: |-
|
||||
lastTransitionTime is the last time the condition transitioned from one status to another.
|
||||
This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: |-
|
||||
message is a human readable message indicating details about the transition.
|
||||
This may be an empty string.
|
||||
maxLength: 32768
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: |-
|
||||
observedGeneration represents the .metadata.generation that the condition was set based upon.
|
||||
For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
|
||||
with respect to the current state of the instance.
|
||||
format: int64
|
||||
minimum: 0
|
||||
type: integer
|
||||
reason:
|
||||
description: |-
|
||||
reason contains a programmatic identifier indicating the reason for the condition's last transition.
|
||||
Producers of specific condition types may define expected values and meanings for this field,
|
||||
and whether the values are considered a guaranteed API.
|
||||
The value should be a CamelCase string.
|
||||
This field may not be empty.
|
||||
maxLength: 1024
|
||||
minLength: 1
|
||||
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
|
||||
type: string
|
||||
status:
|
||||
description: status of the condition, one of True, False, Unknown.
|
||||
enum:
|
||||
- "True"
|
||||
- "False"
|
||||
- Unknown
|
||||
type: string
|
||||
type:
|
||||
description: type of condition in CamelCase or in foo.example.com/CamelCase.
|
||||
maxLength: 316
|
||||
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
|
||||
type: string
|
||||
required:
|
||||
- lastTransitionTime
|
||||
- message
|
||||
- reason
|
||||
- status
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
contentConfigChecksum:
|
||||
description: |-
|
||||
ContentConfigChecksum is a checksum of all the configurations related to
|
||||
the content of the source artifact:
|
||||
- .spec.ignore
|
||||
- .spec.layerSelector
|
||||
observed in .status.observedGeneration version of the object. This can
|
||||
be used to determine if the content configuration has changed and the
|
||||
artifact needs to be rebuilt.
|
||||
It has the format of `<algo>:<checksum>`, for example: `sha256:<checksum>`.
|
||||
|
||||
Deprecated: Replaced with explicit fields for observed artifact content
|
||||
config in the status.
|
||||
type: string
|
||||
lastHandledReconcileAt:
|
||||
description: |-
|
||||
LastHandledReconcileAt holds the value of the most recent
|
||||
reconcile request value, so a change of the annotation value
|
||||
can be detected.
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: ObservedGeneration is the last observed generation.
|
||||
format: int64
|
||||
type: integer
|
||||
observedIgnore:
|
||||
description: |-
|
||||
ObservedIgnore is the observed exclusion patterns used for constructing
|
||||
the source artifact.
|
||||
type: string
|
||||
observedLayerSelector:
|
||||
description: |-
|
||||
ObservedLayerSelector is the observed layer selector used for constructing
|
||||
the source artifact.
|
||||
properties:
|
||||
mediaType:
|
||||
description: |-
|
||||
MediaType specifies the OCI media type of the layer
|
||||
which should be extracted from the OCI Artifact. The
|
||||
first layer matching this type is selected.
|
||||
type: string
|
||||
operation:
|
||||
description: |-
|
||||
Operation specifies how the selected layer should be processed.
|
||||
By default, the layer compressed content is extracted to storage.
|
||||
When the operation is set to 'copy', the layer compressed content
|
||||
is persisted to storage as it is.
|
||||
enum:
|
||||
- extract
|
||||
- copy
|
||||
type: string
|
||||
type: object
|
||||
url:
|
||||
description: URL is the download link for the artifact output of the
|
||||
last OCI Repository sync.
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
storage: false
|
||||
subresources:
|
||||
status: {}
|
||||
status:
|
||||
acceptedNames:
|
||||
kind: ""
|
||||
plural: ""
|
||||
conditions: []
|
||||
storedVersions: []
|
||||
|
|
|
@ -6,4 +6,4 @@ resources:
|
|||
images:
|
||||
- name: fluxcd/source-controller
|
||||
newName: fluxcd/source-controller
|
||||
newTag: v1.6.0
|
||||
newTag: v0.32.1
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: manager-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
|
@ -19,20 +20,10 @@ rules:
|
|||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- serviceaccounts/token
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- source.toolkit.fluxcd.io
|
||||
resources:
|
||||
- buckets
|
||||
- gitrepositories
|
||||
- helmcharts
|
||||
- helmrepositories
|
||||
- ocirepositories
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
|
@ -45,10 +36,6 @@ rules:
|
|||
- source.toolkit.fluxcd.io
|
||||
resources:
|
||||
- buckets/finalizers
|
||||
- gitrepositories/finalizers
|
||||
- helmcharts/finalizers
|
||||
- helmrepositories/finalizers
|
||||
- ocirepositories/finalizers
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
|
@ -59,9 +46,125 @@ rules:
|
|||
- source.toolkit.fluxcd.io
|
||||
resources:
|
||||
- buckets/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- source.toolkit.fluxcd.io
|
||||
resources:
|
||||
- gitrepositories
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- source.toolkit.fluxcd.io
|
||||
resources:
|
||||
- gitrepositories/finalizers
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- source.toolkit.fluxcd.io
|
||||
resources:
|
||||
- gitrepositories/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- source.toolkit.fluxcd.io
|
||||
resources:
|
||||
- helmcharts
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- source.toolkit.fluxcd.io
|
||||
resources:
|
||||
- helmcharts/finalizers
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- source.toolkit.fluxcd.io
|
||||
resources:
|
||||
- helmcharts/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- source.toolkit.fluxcd.io
|
||||
resources:
|
||||
- helmrepositories
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- source.toolkit.fluxcd.io
|
||||
resources:
|
||||
- helmrepositories/finalizers
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- source.toolkit.fluxcd.io
|
||||
resources:
|
||||
- helmrepositories/status
|
||||
verbs:
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- source.toolkit.fluxcd.io
|
||||
resources:
|
||||
- ocirepositories
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- source.toolkit.fluxcd.io
|
||||
resources:
|
||||
- ocirepositories/finalizers
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- source.toolkit.fluxcd.io
|
||||
resources:
|
||||
- ocirepositories/status
|
||||
verbs:
|
||||
- get
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
apiVersion: source.toolkit.fluxcd.io/v1
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta2
|
||||
kind: Bucket
|
||||
metadata:
|
||||
name: bucket-sample
|
|
@ -1,4 +1,4 @@
|
|||
apiVersion: source.toolkit.fluxcd.io/v1
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta2
|
||||
kind: GitRepository
|
||||
metadata:
|
||||
name: gitrepository-sample
|
|
@ -1,4 +1,4 @@
|
|||
apiVersion: source.toolkit.fluxcd.io/v1
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta2
|
||||
kind: HelmChart
|
||||
metadata:
|
||||
name: helmchart-git-sample
|
|
@ -1,4 +1,4 @@
|
|||
apiVersion: source.toolkit.fluxcd.io/v1
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta2
|
||||
kind: HelmChart
|
||||
metadata:
|
||||
name: helmchart-sample-oci
|
|
@ -1,12 +1,11 @@
|
|||
apiVersion: source.toolkit.fluxcd.io/v1
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta2
|
||||
kind: HelmChart
|
||||
metadata:
|
||||
name: helmchart-sample
|
||||
spec:
|
||||
chart: podinfo
|
||||
version: '6.x'
|
||||
version: '>=2.0.0 <3.0.0'
|
||||
sourceRef:
|
||||
kind: HelmRepository
|
||||
name: helmrepository-sample
|
||||
interval: 1m
|
||||
ignoreMissingValuesFiles: true
|
|
@ -1,4 +1,4 @@
|
|||
apiVersion: source.toolkit.fluxcd.io/v1
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta2
|
||||
kind: HelmRepository
|
||||
metadata:
|
||||
name: helmrepository-sample-oci
|
|
@ -1,4 +1,4 @@
|
|||
apiVersion: source.toolkit.fluxcd.io/v1
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta2
|
||||
kind: HelmRepository
|
||||
metadata:
|
||||
name: helmrepository-sample
|
|
@ -1,4 +1,4 @@
|
|||
apiVersion: source.toolkit.fluxcd.io/v1
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta2
|
||||
kind: OCIRepository
|
||||
metadata:
|
||||
name: ocirepository-sample
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
apiVersion: source.toolkit.fluxcd.io/v1
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta1
|
||||
kind: Bucket
|
||||
metadata:
|
||||
name: podinfo
|
||||
|
|
|
@ -1,10 +1,29 @@
|
|||
apiVersion: source.toolkit.fluxcd.io/v1
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta1
|
||||
kind: GitRepository
|
||||
metadata:
|
||||
name: large-repo
|
||||
name: large-repo-go-git
|
||||
spec:
|
||||
gitImplementation: go-git
|
||||
interval: 10m
|
||||
timeout: 2m
|
||||
url: https://github.com/nodejs/node.git
|
||||
url: https://github.com/hashgraph/hedera-mirror-node.git
|
||||
ref:
|
||||
branch: main
|
||||
ignore: |
|
||||
/*
|
||||
!/charts
|
||||
---
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta1
|
||||
kind: GitRepository
|
||||
metadata:
|
||||
name: large-repo-libgit2
|
||||
spec:
|
||||
gitImplementation: libgit2
|
||||
interval: 10m
|
||||
timeout: 2m
|
||||
url: https://github.com/hashgraph/hedera-mirror-node.git
|
||||
ref:
|
||||
branch: main
|
||||
ignore: |
|
||||
/*
|
||||
!/charts
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
apiVersion: source.toolkit.fluxcd.io/v1
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta1
|
||||
kind: Bucket
|
||||
metadata:
|
||||
name: charts
|
||||
|
@ -13,7 +13,7 @@ spec:
|
|||
secretRef:
|
||||
name: minio-credentials
|
||||
---
|
||||
apiVersion: source.toolkit.fluxcd.io/v1
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta1
|
||||
kind: HelmChart
|
||||
metadata:
|
||||
name: helmchart-bucket
|
||||
|
|
|
@ -1,25 +0,0 @@
|
|||
---
|
||||
apiVersion: source.toolkit.fluxcd.io/v1
|
||||
kind: HelmRepository
|
||||
metadata:
|
||||
name: podinfo-notation
|
||||
spec:
|
||||
url: oci://ghcr.io/stefanprodan/charts
|
||||
type: "oci"
|
||||
interval: 1m
|
||||
---
|
||||
apiVersion: source.toolkit.fluxcd.io/v1
|
||||
kind: HelmChart
|
||||
metadata:
|
||||
name: podinfo-notation
|
||||
spec:
|
||||
chart: podinfo
|
||||
sourceRef:
|
||||
kind: HelmRepository
|
||||
name: podinfo-notation
|
||||
version: '6.6.0'
|
||||
interval: 1m
|
||||
verify:
|
||||
provider: notation
|
||||
secretRef:
|
||||
name: notation-config
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
apiVersion: source.toolkit.fluxcd.io/v1
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta2
|
||||
kind: HelmRepository
|
||||
metadata:
|
||||
name: podinfo
|
||||
|
@ -8,7 +8,7 @@ spec:
|
|||
type: "oci"
|
||||
interval: 1m
|
||||
---
|
||||
apiVersion: source.toolkit.fluxcd.io/v1
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta2
|
||||
kind: HelmChart
|
||||
metadata:
|
||||
name: podinfo
|
||||
|
@ -20,7 +20,7 @@ spec:
|
|||
version: '6.1.*'
|
||||
interval: 1m
|
||||
---
|
||||
apiVersion: source.toolkit.fluxcd.io/v1
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta2
|
||||
kind: HelmChart
|
||||
metadata:
|
||||
name: podinfo-keyless
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
apiVersion: source.toolkit.fluxcd.io/v1
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta2
|
||||
kind: OCIRepository
|
||||
metadata:
|
||||
name: podinfo-deploy-signed-with-key
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
apiVersion: source.toolkit.fluxcd.io/v1
|
||||
apiVersion: source.toolkit.fluxcd.io/v1beta2
|
||||
kind: OCIRepository
|
||||
metadata:
|
||||
name: podinfo-deploy-signed-with-keyless
|
||||
|
|
|
@ -1,14 +0,0 @@
|
|||
---
|
||||
apiVersion: source.toolkit.fluxcd.io/v1
|
||||
kind: OCIRepository
|
||||
metadata:
|
||||
name: podinfo-deploy-signed-with-notation
|
||||
spec:
|
||||
interval: 5m
|
||||
url: oci://ghcr.io/stefanprodan/podinfo-deploy
|
||||
ref:
|
||||
semver: "6.6.x"
|
||||
verify:
|
||||
provider: notation
|
||||
secretRef:
|
||||
name: notation-config
|
|
@ -14,9 +14,9 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
package controllers
|
||||
|
||||
import sourcev1 "github.com/fluxcd/source-controller/api/v1"
|
||||
import sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
|
||||
|
||||
type artifactSet []*sourcev1.Artifact
|
||||
|
||||
|
@ -37,3 +37,25 @@ outer:
|
|||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// hasArtifactUpdated returns true if any of the revisions in the current artifacts
|
||||
// does not match any of the artifacts in the updated artifacts
|
||||
// NOTE: artifactSet is a replacement for this. Remove this once it's not used
|
||||
// anywhere.
|
||||
func hasArtifactUpdated(current []*sourcev1.Artifact, updated []*sourcev1.Artifact) bool {
|
||||
if len(current) != len(updated) {
|
||||
return true
|
||||
}
|
||||
|
||||
OUTER:
|
||||
for _, c := range current {
|
||||
for _, u := range updated {
|
||||
if u.HasRevision(c.Revision) {
|
||||
continue OUTER
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
|
@ -14,12 +14,12 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
sourcev1 "github.com/fluxcd/source-controller/api/v1"
|
||||
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/gomega/types"
|
||||
)
|
||||
|
@ -51,6 +51,9 @@ func (m matchArtifact) Match(actual interface{}) (success bool, err error) {
|
|||
if ok, err = Equal(m.expected.Revision).Match(actualArtifact.Revision); !ok {
|
||||
return ok, err
|
||||
}
|
||||
if ok, err = Equal(m.expected.Checksum).Match(actualArtifact.Checksum); !ok {
|
||||
return ok, err
|
||||
}
|
||||
if ok, err = Equal(m.expected.Size).Match(actualArtifact.Size); !ok {
|
||||
return ok, err
|
||||
}
|
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"testing"
|
|
@ -14,52 +14,46 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
stdtls "crypto/tls"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/fluxcd/source-controller/pkg/azure"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"golang.org/x/sync/semaphore"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
kuberecorder "k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/ratelimiter"
|
||||
|
||||
eventv1 "github.com/fluxcd/pkg/apis/event/v1beta1"
|
||||
"github.com/fluxcd/pkg/apis/meta"
|
||||
"github.com/fluxcd/pkg/runtime/conditions"
|
||||
helper "github.com/fluxcd/pkg/runtime/controller"
|
||||
"github.com/fluxcd/pkg/runtime/jitter"
|
||||
"github.com/fluxcd/pkg/runtime/patch"
|
||||
"github.com/fluxcd/pkg/runtime/predicates"
|
||||
rreconcile "github.com/fluxcd/pkg/runtime/reconcile"
|
||||
"github.com/fluxcd/pkg/sourceignore"
|
||||
|
||||
sourcev1 "github.com/fluxcd/source-controller/api/v1"
|
||||
intdigest "github.com/fluxcd/source-controller/internal/digest"
|
||||
eventv1 "github.com/fluxcd/pkg/apis/event/v1beta1"
|
||||
"github.com/fluxcd/pkg/sourceignore"
|
||||
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
|
||||
serror "github.com/fluxcd/source-controller/internal/error"
|
||||
"github.com/fluxcd/source-controller/internal/index"
|
||||
sreconcile "github.com/fluxcd/source-controller/internal/reconcile"
|
||||
"github.com/fluxcd/source-controller/internal/reconcile/summarize"
|
||||
"github.com/fluxcd/source-controller/internal/tls"
|
||||
"github.com/fluxcd/source-controller/pkg/azure"
|
||||
"github.com/fluxcd/source-controller/pkg/gcp"
|
||||
"github.com/fluxcd/source-controller/pkg/minio"
|
||||
)
|
||||
|
@ -77,7 +71,7 @@ import (
|
|||
const maxConcurrentBucketFetches = 100
|
||||
|
||||
// bucketReadyCondition contains the information required to summarize a
|
||||
// v1.Bucket Ready Condition.
|
||||
// v1beta2.Bucket Ready Condition.
|
||||
var bucketReadyCondition = summarize.Conditions{
|
||||
Target: meta.ReadyCondition,
|
||||
Owned: []string{
|
||||
|
@ -117,7 +111,7 @@ var bucketFailConditions = []string{
|
|||
// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=buckets/finalizers,verbs=get;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch
|
||||
|
||||
// BucketReconciler reconciles a v1.Bucket object.
|
||||
// BucketReconciler reconciles a v1beta2.Bucket object.
|
||||
type BucketReconciler struct {
|
||||
client.Client
|
||||
kuberecorder.EventRecorder
|
||||
|
@ -125,12 +119,11 @@ type BucketReconciler struct {
|
|||
|
||||
Storage *Storage
|
||||
ControllerName string
|
||||
|
||||
patchOptions []patch.Option
|
||||
}
|
||||
|
||||
type BucketReconcilerOptions struct {
|
||||
RateLimiter workqueue.TypedRateLimiter[reconcile.Request]
|
||||
MaxConcurrentReconciles int
|
||||
RateLimiter ratelimiter.RateLimiter
|
||||
}
|
||||
|
||||
// BucketProvider is an interface for fetching objects from a storage provider
|
||||
|
@ -147,7 +140,7 @@ type BucketProvider interface {
|
|||
// bucket, calling visit for every item.
|
||||
// If the underlying client or the visit callback returns an error,
|
||||
// it returns early.
|
||||
VisitObjects(ctx context.Context, bucketName string, prefix string, visit func(key, etag string) error) error
|
||||
VisitObjects(ctx context.Context, bucketName string, visit func(key, etag string) error) error
|
||||
// ObjectIsNotFound returns true if the given error indicates an object
|
||||
// could not be found.
|
||||
ObjectIsNotFound(error) bool
|
||||
|
@ -155,23 +148,99 @@ type BucketProvider interface {
|
|||
Close(context.Context)
|
||||
}
|
||||
|
||||
// bucketReconcileFunc is the function type for all the v1.Bucket
|
||||
// bucketReconcileFunc is the function type for all the v1beta2.Bucket
|
||||
// (sub)reconcile functions. The type implementations are grouped and
|
||||
// executed serially to perform the complete reconcile of the object.
|
||||
type bucketReconcileFunc func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.Bucket, index *index.Digester, dir string) (sreconcile.Result, error)
|
||||
type bucketReconcileFunc func(ctx context.Context, obj *sourcev1.Bucket, index *etagIndex, dir string) (sreconcile.Result, error)
|
||||
|
||||
// etagIndex is an index of storage object keys and their Etag values.
|
||||
type etagIndex struct {
|
||||
sync.RWMutex
|
||||
index map[string]string
|
||||
}
|
||||
|
||||
// newEtagIndex returns a new etagIndex with an empty initialized index.
|
||||
func newEtagIndex() *etagIndex {
|
||||
return &etagIndex{
|
||||
index: make(map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
func (i *etagIndex) Add(key, etag string) {
|
||||
i.Lock()
|
||||
defer i.Unlock()
|
||||
i.index[key] = etag
|
||||
}
|
||||
|
||||
func (i *etagIndex) Delete(key string) {
|
||||
i.Lock()
|
||||
defer i.Unlock()
|
||||
delete(i.index, key)
|
||||
}
|
||||
|
||||
func (i *etagIndex) Get(key string) string {
|
||||
i.RLock()
|
||||
defer i.RUnlock()
|
||||
return i.index[key]
|
||||
}
|
||||
|
||||
func (i *etagIndex) Has(key string) bool {
|
||||
i.RLock()
|
||||
defer i.RUnlock()
|
||||
_, ok := i.index[key]
|
||||
return ok
|
||||
}
|
||||
|
||||
func (i *etagIndex) Index() map[string]string {
|
||||
i.RLock()
|
||||
defer i.RUnlock()
|
||||
index := make(map[string]string)
|
||||
for k, v := range i.index {
|
||||
index[k] = v
|
||||
}
|
||||
return index
|
||||
}
|
||||
|
||||
func (i *etagIndex) Len() int {
|
||||
i.RLock()
|
||||
defer i.RUnlock()
|
||||
return len(i.index)
|
||||
}
|
||||
|
||||
// Revision calculates the SHA256 checksum of the index.
|
||||
// The keys are stable sorted, and the SHA256 sum is then calculated for the
|
||||
// string representation of the key/value pairs, each pair written on a newline
|
||||
// with a space between them. The sum result is returned as a string.
|
||||
func (i *etagIndex) Revision() (string, error) {
|
||||
i.RLock()
|
||||
defer i.RUnlock()
|
||||
keyIndex := make([]string, 0, len(i.index))
|
||||
for k := range i.index {
|
||||
keyIndex = append(keyIndex, k)
|
||||
}
|
||||
|
||||
sort.Strings(keyIndex)
|
||||
sum := sha256.New()
|
||||
for _, k := range keyIndex {
|
||||
if _, err := sum.Write([]byte(fmt.Sprintf("%s %s\n", k, i.index[k]))); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("%x", sum.Sum(nil)), nil
|
||||
}
|
||||
|
||||
func (r *BucketReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return r.SetupWithManagerAndOptions(mgr, BucketReconcilerOptions{})
|
||||
}
|
||||
|
||||
func (r *BucketReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts BucketReconcilerOptions) error {
|
||||
r.patchOptions = getPatchOptions(bucketReadyCondition.Owned, r.ControllerName)
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&sourcev1.Bucket{}).
|
||||
WithEventFilter(predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{})).
|
||||
WithOptions(controller.Options{
|
||||
RateLimiter: opts.RateLimiter,
|
||||
MaxConcurrentReconciles: opts.MaxConcurrentReconciles,
|
||||
RateLimiter: opts.RateLimiter,
|
||||
RecoverPanic: true,
|
||||
}).
|
||||
Complete(r)
|
||||
}
|
||||
|
@ -186,8 +255,14 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res
|
|||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
// Record suspended status metric
|
||||
r.RecordSuspend(ctx, obj, obj.Spec.Suspend)
|
||||
|
||||
// Initialize the patch helper with the current version of the object.
|
||||
serialPatcher := patch.NewSerialPatcher(obj, r.Client)
|
||||
patchHelper, err := patch.NewHelper(obj, r.Client)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// recResult stores the abstracted reconcile result.
|
||||
var recResult sreconcile.Result
|
||||
|
@ -195,43 +270,39 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res
|
|||
// Always attempt to patch the object and status after each reconciliation
|
||||
// NOTE: The final runtime result and error are set in this block.
|
||||
defer func() {
|
||||
summarizeHelper := summarize.NewHelper(r.EventRecorder, serialPatcher)
|
||||
summarizeHelper := summarize.NewHelper(r.EventRecorder, patchHelper)
|
||||
summarizeOpts := []summarize.Option{
|
||||
summarize.WithConditions(bucketReadyCondition),
|
||||
summarize.WithReconcileResult(recResult),
|
||||
summarize.WithReconcileError(retErr),
|
||||
summarize.WithIgnoreNotFound(),
|
||||
summarize.WithProcessors(
|
||||
summarize.ErrorActionHandler,
|
||||
summarize.RecordContextualError,
|
||||
summarize.RecordReconcileReq,
|
||||
),
|
||||
summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{
|
||||
RequeueAfter: jitter.JitteredIntervalDuration(obj.GetRequeueAfter()),
|
||||
}),
|
||||
summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetRequeueAfter()}),
|
||||
summarize.WithPatchFieldOwner(r.ControllerName),
|
||||
}
|
||||
result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...)
|
||||
|
||||
// Always record duration metrics.
|
||||
// Always record readiness and duration metrics
|
||||
r.Metrics.RecordReadiness(ctx, obj)
|
||||
r.Metrics.RecordDuration(ctx, obj, start)
|
||||
}()
|
||||
|
||||
// Examine if the object is under deletion.
|
||||
if !obj.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
recResult, retErr = r.reconcileDelete(ctx, obj)
|
||||
return
|
||||
}
|
||||
|
||||
// Add finalizer first if not exist to avoid the race condition between init
|
||||
// and delete.
|
||||
// Note: Finalizers in general can only be added when the deletionTimestamp
|
||||
// is not set.
|
||||
// Add finalizer first if not exist to avoid the race condition between init and delete
|
||||
if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) {
|
||||
controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer)
|
||||
recResult = sreconcile.ResultRequeue
|
||||
return
|
||||
}
|
||||
|
||||
// Examine if the object is under deletion
|
||||
if !obj.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
recResult, retErr = r.reconcileDelete(ctx, obj)
|
||||
return
|
||||
}
|
||||
|
||||
// Return if the object is suspended.
|
||||
if obj.Spec.Suspend {
|
||||
log.Info("reconciliation is suspended for this object")
|
||||
|
@ -245,45 +316,29 @@ func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res
|
|||
r.reconcileSource,
|
||||
r.reconcileArtifact,
|
||||
}
|
||||
recResult, retErr = r.reconcile(ctx, serialPatcher, obj, reconcilers)
|
||||
recResult, retErr = r.reconcile(ctx, obj, reconcilers)
|
||||
return
|
||||
}
|
||||
|
||||
// reconcile iterates through the bucketReconcileFunc tasks for the
|
||||
// object. It returns early on the first call that returns
|
||||
// reconcile.ResultRequeue, or produces an error.
|
||||
func (r *BucketReconciler) reconcile(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.Bucket, reconcilers []bucketReconcileFunc) (sreconcile.Result, error) {
|
||||
func (r *BucketReconciler) reconcile(ctx context.Context, obj *sourcev1.Bucket, reconcilers []bucketReconcileFunc) (sreconcile.Result, error) {
|
||||
oldObj := obj.DeepCopy()
|
||||
|
||||
rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason, "reconciliation in progress")
|
||||
|
||||
var recAtVal string
|
||||
if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok {
|
||||
recAtVal = v
|
||||
}
|
||||
|
||||
// Persist reconciling if generation differs or reconciliation is requested.
|
||||
switch {
|
||||
case obj.Generation != obj.Status.ObservedGeneration:
|
||||
rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason,
|
||||
"processing object: new generation %d -> %d", obj.Status.ObservedGeneration, obj.Generation)
|
||||
if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil {
|
||||
return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason)
|
||||
}
|
||||
case recAtVal != obj.Status.GetLastHandledReconcileRequest():
|
||||
if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil {
|
||||
return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason)
|
||||
}
|
||||
// Mark as reconciling if generation differs.
|
||||
if obj.Generation != obj.Status.ObservedGeneration {
|
||||
conditions.MarkReconciling(obj, "NewGeneration", "reconciling new object generation (%d)", obj.Generation)
|
||||
}
|
||||
|
||||
// Create temp working dir
|
||||
tmpDir, err := os.MkdirTemp("", fmt.Sprintf("%s-%s-%s-", obj.Kind, obj.Namespace, obj.Name))
|
||||
if err != nil {
|
||||
e := serror.NewGeneric(
|
||||
fmt.Errorf("failed to create temporary working directory: %w", err),
|
||||
sourcev1.DirCreationFailedReason,
|
||||
)
|
||||
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e)
|
||||
e := &serror.Event{
|
||||
Err: fmt.Errorf("failed to create temporary working directory: %w", err),
|
||||
Reason: sourcev1.DirCreationFailedReason,
|
||||
}
|
||||
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
|
||||
return sreconcile.ResultEmpty, e
|
||||
}
|
||||
defer func() {
|
||||
|
@ -297,11 +352,11 @@ func (r *BucketReconciler) reconcile(ctx context.Context, sp *patch.SerialPatche
|
|||
var (
|
||||
res sreconcile.Result
|
||||
resErr error
|
||||
index = index.NewDigester()
|
||||
index = newEtagIndex()
|
||||
)
|
||||
|
||||
for _, rec := range reconcilers {
|
||||
recResult, err := rec(ctx, sp, obj, index, tmpDir)
|
||||
recResult, err := rec(ctx, obj, index, tmpDir)
|
||||
// Exit immediately on ResultRequeue.
|
||||
if recResult == sreconcile.ResultRequeue {
|
||||
return sreconcile.ResultRequeue, nil
|
||||
|
@ -323,19 +378,24 @@ func (r *BucketReconciler) reconcile(ctx context.Context, sp *patch.SerialPatche
|
|||
}
|
||||
|
||||
// notify emits notification related to the reconciliation.
|
||||
func (r *BucketReconciler) notify(ctx context.Context, oldObj, newObj *sourcev1.Bucket, index *index.Digester, res sreconcile.Result, resErr error) {
|
||||
func (r *BucketReconciler) notify(ctx context.Context, oldObj, newObj *sourcev1.Bucket, index *etagIndex, res sreconcile.Result, resErr error) {
|
||||
// Notify successful reconciliation for new artifact and recovery from any
|
||||
// failure.
|
||||
if resErr == nil && res == sreconcile.ResultSuccess && newObj.Status.Artifact != nil {
|
||||
annotations := map[string]string{
|
||||
fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaRevisionKey): newObj.Status.Artifact.Revision,
|
||||
fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaDigestKey): newObj.Status.Artifact.Digest,
|
||||
sourcev1.GroupVersion.Group + "/revision": newObj.Status.Artifact.Revision,
|
||||
sourcev1.GroupVersion.Group + "/checksum": newObj.Status.Artifact.Checksum,
|
||||
}
|
||||
|
||||
var oldChecksum string
|
||||
if oldObj.GetArtifact() != nil {
|
||||
oldChecksum = oldObj.GetArtifact().Checksum
|
||||
}
|
||||
|
||||
message := fmt.Sprintf("stored artifact with %d fetched files from '%s' bucket", index.Len(), newObj.Spec.BucketName)
|
||||
|
||||
// Notify on new artifact and failure recovery.
|
||||
if !oldObj.GetArtifact().HasDigest(newObj.GetArtifact().Digest) {
|
||||
if oldChecksum != newObj.GetArtifact().Checksum {
|
||||
r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal,
|
||||
"NewArtifact", message)
|
||||
ctrl.LoggerFrom(ctx).Info(message)
|
||||
|
@ -361,49 +421,22 @@ func (r *BucketReconciler) notify(ctx context.Context, oldObj, newObj *sourcev1.
|
|||
// condition is added.
|
||||
// The hostname of any URL in the Status of the object are updated, to ensure
|
||||
// they match the Storage server hostname of current runtime.
|
||||
func (r *BucketReconciler) reconcileStorage(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.Bucket, _ *index.Digester, _ string) (sreconcile.Result, error) {
|
||||
func (r *BucketReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.Bucket, _ *etagIndex, _ string) (sreconcile.Result, error) {
|
||||
// Garbage collect previous advertised artifact(s) from storage
|
||||
_ = r.garbageCollect(ctx, obj)
|
||||
|
||||
var artifactMissing bool
|
||||
if artifact := obj.GetArtifact(); artifact != nil {
|
||||
// Determine if the advertised artifact is still in storage
|
||||
if !r.Storage.ArtifactExist(*artifact) {
|
||||
artifactMissing = true
|
||||
}
|
||||
|
||||
// If the artifact is in storage, verify if the advertised digest still
|
||||
// matches the actual artifact
|
||||
if !artifactMissing {
|
||||
if err := r.Storage.VerifyArtifact(*artifact); err != nil {
|
||||
r.Eventf(obj, corev1.EventTypeWarning, "ArtifactVerificationFailed", "failed to verify integrity of artifact: %s", err.Error())
|
||||
|
||||
if err = r.Storage.Remove(*artifact); err != nil {
|
||||
return sreconcile.ResultEmpty, fmt.Errorf("failed to remove artifact after digest mismatch: %w", err)
|
||||
}
|
||||
|
||||
artifactMissing = true
|
||||
}
|
||||
}
|
||||
|
||||
// If the artifact is missing, remove it from the object
|
||||
if artifactMissing {
|
||||
obj.Status.Artifact = nil
|
||||
obj.Status.URL = ""
|
||||
}
|
||||
// Determine if the advertised artifact is still in storage
|
||||
if artifact := obj.GetArtifact(); artifact != nil && !r.Storage.ArtifactExist(*artifact) {
|
||||
obj.Status.Artifact = nil
|
||||
obj.Status.URL = ""
|
||||
// Remove the condition as the artifact doesn't exist.
|
||||
conditions.Delete(obj, sourcev1.ArtifactInStorageCondition)
|
||||
}
|
||||
|
||||
// Record that we do not have an artifact
|
||||
if obj.GetArtifact() == nil {
|
||||
msg := "building artifact"
|
||||
if artifactMissing {
|
||||
msg += ": disappeared from storage"
|
||||
}
|
||||
rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "%s", msg)
|
||||
conditions.MarkReconciling(obj, "NoArtifact", "no artifact for resource in storage")
|
||||
conditions.Delete(obj, sourcev1.ArtifactInStorageCondition)
|
||||
if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil {
|
||||
return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason)
|
||||
}
|
||||
return sreconcile.ResultSuccess, nil
|
||||
}
|
||||
|
||||
|
@ -418,164 +451,92 @@ func (r *BucketReconciler) reconcileStorage(ctx context.Context, sp *patch.Seria
|
|||
// reconcileSource fetches the upstream bucket contents with the client for the
|
||||
// given object's Provider, and returns the result.
|
||||
// When a SecretRef is defined, it attempts to fetch the Secret before calling
|
||||
// the provider. If this fails, it records v1.FetchFailedCondition=True on
|
||||
// the provider. If this fails, it records v1beta2.FetchFailedCondition=True on
|
||||
// the object and returns early.
|
||||
func (r *BucketReconciler) reconcileSource(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.Bucket, index *index.Digester, dir string) (sreconcile.Result, error) {
|
||||
secret, err := r.getSecret(ctx, obj.Spec.SecretRef, obj.GetNamespace())
|
||||
func (r *BucketReconciler) reconcileSource(ctx context.Context, obj *sourcev1.Bucket, index *etagIndex, dir string) (sreconcile.Result, error) {
|
||||
secret, err := r.getBucketSecret(ctx, obj)
|
||||
if err != nil {
|
||||
e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason)
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e)
|
||||
e := &serror.Event{Err: err, Reason: sourcev1.AuthenticationFailedReason}
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error())
|
||||
// Return error as the world as observed may change
|
||||
return sreconcile.ResultEmpty, e
|
||||
}
|
||||
proxyURL, err := r.getProxyURL(ctx, obj)
|
||||
if err != nil {
|
||||
e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason)
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e)
|
||||
return sreconcile.ResultEmpty, e
|
||||
}
|
||||
|
||||
// Construct provider client
|
||||
var provider BucketProvider
|
||||
switch obj.Spec.Provider {
|
||||
case sourcev1.BucketProviderGoogle:
|
||||
case sourcev1.GoogleBucketProvider:
|
||||
if err = gcp.ValidateSecret(secret); err != nil {
|
||||
e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason)
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e)
|
||||
e := &serror.Event{Err: err, Reason: sourcev1.AuthenticationFailedReason}
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error())
|
||||
return sreconcile.ResultEmpty, e
|
||||
}
|
||||
var opts []gcp.Option
|
||||
if secret != nil {
|
||||
opts = append(opts, gcp.WithSecret(secret))
|
||||
}
|
||||
if proxyURL != nil {
|
||||
opts = append(opts, gcp.WithProxyURL(proxyURL))
|
||||
}
|
||||
if provider, err = gcp.NewClient(ctx, opts...); err != nil {
|
||||
e := serror.NewGeneric(err, "ClientError")
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e)
|
||||
if provider, err = gcp.NewClient(ctx, secret); err != nil {
|
||||
e := &serror.Event{Err: err, Reason: "ClientError"}
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error())
|
||||
return sreconcile.ResultEmpty, e
|
||||
}
|
||||
case sourcev1.BucketProviderAzure:
|
||||
case sourcev1.AzureBucketProvider:
|
||||
if err = azure.ValidateSecret(secret); err != nil {
|
||||
e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason)
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e)
|
||||
e := &serror.Event{Err: err, Reason: sourcev1.AuthenticationFailedReason}
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error())
|
||||
return sreconcile.ResultEmpty, e
|
||||
}
|
||||
var opts []azure.Option
|
||||
if secret != nil {
|
||||
opts = append(opts, azure.WithSecret(secret))
|
||||
}
|
||||
if proxyURL != nil {
|
||||
opts = append(opts, azure.WithProxyURL(proxyURL))
|
||||
}
|
||||
if provider, err = azure.NewClient(obj, opts...); err != nil {
|
||||
e := serror.NewGeneric(err, "ClientError")
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e)
|
||||
if provider, err = azure.NewClient(obj, secret); err != nil {
|
||||
e := &serror.Event{Err: err, Reason: "ClientError"}
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error())
|
||||
return sreconcile.ResultEmpty, e
|
||||
}
|
||||
default:
|
||||
if err = minio.ValidateSecret(secret); err != nil {
|
||||
e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason)
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e)
|
||||
e := &serror.Event{Err: err, Reason: sourcev1.AuthenticationFailedReason}
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error())
|
||||
return sreconcile.ResultEmpty, e
|
||||
}
|
||||
tlsConfig, err := r.getTLSConfig(ctx, obj.Spec.CertSecretRef, obj.GetNamespace(), obj.Spec.Endpoint)
|
||||
if err != nil {
|
||||
e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason)
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e)
|
||||
return sreconcile.ResultEmpty, e
|
||||
}
|
||||
stsSecret, err := r.getSTSSecret(ctx, obj)
|
||||
if err != nil {
|
||||
e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason)
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e)
|
||||
return sreconcile.ResultEmpty, e
|
||||
}
|
||||
stsTLSConfig, err := r.getSTSTLSConfig(ctx, obj)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("failed to get STS TLS config: %w", err)
|
||||
e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason)
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e)
|
||||
return sreconcile.ResultEmpty, e
|
||||
}
|
||||
if sts := obj.Spec.STS; sts != nil {
|
||||
if err := minio.ValidateSTSProvider(obj.Spec.Provider, sts); err != nil {
|
||||
e := serror.NewStalling(err, sourcev1.InvalidSTSConfigurationReason)
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e)
|
||||
return sreconcile.ResultEmpty, e
|
||||
}
|
||||
if _, err := url.Parse(sts.Endpoint); err != nil {
|
||||
err := fmt.Errorf("failed to parse STS endpoint '%s': %w", sts.Endpoint, err)
|
||||
e := serror.NewStalling(err, sourcev1.URLInvalidReason)
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e)
|
||||
return sreconcile.ResultEmpty, e
|
||||
}
|
||||
if err := minio.ValidateSTSSecret(sts.Provider, stsSecret); err != nil {
|
||||
e := serror.NewGeneric(err, sourcev1.AuthenticationFailedReason)
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e)
|
||||
return sreconcile.ResultEmpty, e
|
||||
}
|
||||
}
|
||||
var opts []minio.Option
|
||||
if secret != nil {
|
||||
opts = append(opts, minio.WithSecret(secret))
|
||||
}
|
||||
if tlsConfig != nil {
|
||||
opts = append(opts, minio.WithTLSConfig(tlsConfig))
|
||||
}
|
||||
if proxyURL != nil {
|
||||
opts = append(opts, minio.WithProxyURL(proxyURL))
|
||||
}
|
||||
if stsSecret != nil {
|
||||
opts = append(opts, minio.WithSTSSecret(stsSecret))
|
||||
}
|
||||
if stsTLSConfig != nil {
|
||||
opts = append(opts, minio.WithSTSTLSConfig(stsTLSConfig))
|
||||
}
|
||||
if provider, err = minio.NewClient(obj, opts...); err != nil {
|
||||
e := serror.NewGeneric(err, "ClientError")
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e)
|
||||
if provider, err = minio.NewClient(obj, secret); err != nil {
|
||||
e := &serror.Event{Err: err, Reason: "ClientError"}
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error())
|
||||
return sreconcile.ResultEmpty, e
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch etag index
|
||||
if err = fetchEtagIndex(ctx, provider, obj, index, dir); err != nil {
|
||||
e := serror.NewGeneric(err, sourcev1.BucketOperationFailedReason)
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e)
|
||||
e := &serror.Event{Err: err, Reason: sourcev1.BucketOperationFailedReason}
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error())
|
||||
return sreconcile.ResultEmpty, e
|
||||
}
|
||||
|
||||
// Check if index has changed compared to current Artifact revision.
|
||||
var changed bool
|
||||
if artifact := obj.Status.Artifact; artifact != nil && artifact.Revision != "" {
|
||||
curRev := digest.Digest(artifact.Revision)
|
||||
changed = curRev.Validate() != nil || curRev != index.Digest(curRev.Algorithm())
|
||||
// Calculate revision
|
||||
revision, err := index.Revision()
|
||||
if err != nil {
|
||||
return sreconcile.ResultEmpty, &serror.Event{
|
||||
Err: fmt.Errorf("failed to calculate revision: %w", err),
|
||||
Reason: meta.FailedReason,
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch the bucket objects if required to.
|
||||
if artifact := obj.GetArtifact(); artifact == nil || changed {
|
||||
// Mark observations about the revision on the object
|
||||
defer func() {
|
||||
// As fetchIndexFiles can make last-minute modifications to the etag
|
||||
// index, we need to re-calculate the revision at the end
|
||||
revision := index.Digest(intdigest.Canonical)
|
||||
// Mark observations about the revision on the object
|
||||
defer func() {
|
||||
// As fetchIndexFiles can make last-minute modifications to the etag
|
||||
// index, we need to re-calculate the revision at the end
|
||||
revision, err := index.Revision()
|
||||
if err != nil {
|
||||
ctrl.LoggerFrom(ctx).Error(err, "failed to calculate revision after fetching etag index")
|
||||
return
|
||||
}
|
||||
|
||||
if !obj.GetArtifact().HasRevision(revision) {
|
||||
message := fmt.Sprintf("new upstream revision '%s'", revision)
|
||||
if obj.GetArtifact() != nil {
|
||||
conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "%s", message)
|
||||
}
|
||||
rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "building artifact: %s", message)
|
||||
if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil {
|
||||
ctrl.LoggerFrom(ctx).Error(err, "failed to patch")
|
||||
return
|
||||
}
|
||||
}()
|
||||
conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", message)
|
||||
conditions.MarkReconciling(obj, "NewRevision", message)
|
||||
}
|
||||
}()
|
||||
|
||||
if !obj.GetArtifact().HasRevision(revision) {
|
||||
if err = fetchIndexFiles(ctx, provider, obj, index, dir); err != nil {
|
||||
e := serror.NewGeneric(err, sourcev1.BucketOperationFailedReason)
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e)
|
||||
e := &serror.Event{Err: err, Reason: sourcev1.BucketOperationFailedReason}
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Error())
|
||||
return sreconcile.ResultEmpty, e
|
||||
}
|
||||
}
|
||||
|
@ -588,81 +549,81 @@ func (r *BucketReconciler) reconcileSource(ctx context.Context, sp *patch.Serial
|
|||
// (Status) data on the object does not match the given.
|
||||
//
|
||||
// The inspection of the given data to the object is differed, ensuring any
|
||||
// stale observations like v1.ArtifactOutdatedCondition are removed.
|
||||
// stale observations like v1beta2.ArtifactOutdatedCondition are removed.
|
||||
// If the given Artifact does not differ from the object's current, it returns
|
||||
// early.
|
||||
// On a successful archive, the Artifact in the Status of the object is set,
|
||||
// and the symlink in the Storage is updated to its path.
|
||||
func (r *BucketReconciler) reconcileArtifact(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.Bucket, index *index.Digester, dir string) (sreconcile.Result, error) {
|
||||
func (r *BucketReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1.Bucket, index *etagIndex, dir string) (sreconcile.Result, error) {
|
||||
// Calculate revision
|
||||
revision := index.Digest(intdigest.Canonical)
|
||||
revision, err := index.Revision()
|
||||
if err != nil {
|
||||
return sreconcile.ResultEmpty, &serror.Event{
|
||||
Err: fmt.Errorf("failed to calculate revision of new artifact: %w", err),
|
||||
Reason: meta.FailedReason,
|
||||
}
|
||||
}
|
||||
|
||||
// Create artifact
|
||||
artifact := r.Storage.NewArtifactFor(obj.Kind, obj, revision.String(), fmt.Sprintf("%s.tar.gz", revision.Encoded()))
|
||||
artifact := r.Storage.NewArtifactFor(obj.Kind, obj, revision, fmt.Sprintf("%s.tar.gz", revision))
|
||||
|
||||
// Set the ArtifactInStorageCondition if there's no drift.
|
||||
defer func() {
|
||||
if curArtifact := obj.GetArtifact(); curArtifact != nil && curArtifact.Revision != "" {
|
||||
curRev := digest.Digest(curArtifact.Revision)
|
||||
if curRev.Validate() == nil && index.Digest(curRev.Algorithm()) == curRev {
|
||||
conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition)
|
||||
conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason,
|
||||
"stored artifact: revision '%s'", artifact.Revision)
|
||||
}
|
||||
if obj.GetArtifact().HasRevision(artifact.Revision) {
|
||||
conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition)
|
||||
conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason,
|
||||
"stored artifact for revision '%s'", artifact.Revision)
|
||||
}
|
||||
}()
|
||||
|
||||
// The artifact is up-to-date
|
||||
if curArtifact := obj.GetArtifact(); curArtifact != nil && curArtifact.Revision != "" {
|
||||
curRev := digest.Digest(curArtifact.Revision)
|
||||
if curRev.Validate() == nil && index.Digest(curRev.Algorithm()) == curRev {
|
||||
r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.ArtifactUpToDateReason, "artifact up-to-date with remote revision: '%s'", artifact.Revision)
|
||||
return sreconcile.ResultSuccess, nil
|
||||
}
|
||||
if obj.GetArtifact().HasRevision(artifact.Revision) {
|
||||
r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.ArtifactUpToDateReason, "artifact up-to-date with remote revision: '%s'", artifact.Revision)
|
||||
return sreconcile.ResultSuccess, nil
|
||||
}
|
||||
|
||||
// Ensure target path exists and is a directory
|
||||
if f, err := os.Stat(dir); err != nil {
|
||||
e := serror.NewGeneric(
|
||||
fmt.Errorf("failed to stat source path: %w", err),
|
||||
sourcev1.StatOperationFailedReason,
|
||||
)
|
||||
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e)
|
||||
e := &serror.Event{
|
||||
Err: fmt.Errorf("failed to stat source path: %w", err),
|
||||
Reason: sourcev1.StatOperationFailedReason,
|
||||
}
|
||||
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
|
||||
return sreconcile.ResultEmpty, e
|
||||
} else if !f.IsDir() {
|
||||
e := serror.NewGeneric(
|
||||
fmt.Errorf("source path '%s' is not a directory", dir),
|
||||
sourcev1.InvalidPathReason,
|
||||
)
|
||||
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e)
|
||||
e := &serror.Event{
|
||||
Err: fmt.Errorf("source path '%s' is not a directory", dir),
|
||||
Reason: sourcev1.InvalidPathReason,
|
||||
}
|
||||
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
|
||||
return sreconcile.ResultEmpty, e
|
||||
}
|
||||
|
||||
// Ensure artifact directory exists and acquire lock
|
||||
if err := r.Storage.MkdirAll(artifact); err != nil {
|
||||
e := serror.NewGeneric(
|
||||
fmt.Errorf("failed to create artifact directory: %w", err),
|
||||
sourcev1.DirCreationFailedReason,
|
||||
)
|
||||
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e)
|
||||
e := &serror.Event{
|
||||
Err: fmt.Errorf("failed to create artifact directory: %w", err),
|
||||
Reason: sourcev1.DirCreationFailedReason,
|
||||
}
|
||||
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
|
||||
return sreconcile.ResultEmpty, e
|
||||
}
|
||||
unlock, err := r.Storage.Lock(artifact)
|
||||
if err != nil {
|
||||
return sreconcile.ResultEmpty, serror.NewGeneric(
|
||||
fmt.Errorf("failed to acquire lock for artifact: %w", err),
|
||||
meta.FailedReason,
|
||||
)
|
||||
return sreconcile.ResultEmpty, &serror.Event{
|
||||
Err: fmt.Errorf("failed to acquire lock for artifact: %w", err),
|
||||
Reason: meta.FailedReason,
|
||||
}
|
||||
}
|
||||
defer unlock()
|
||||
|
||||
// Archive directory to storage
|
||||
if err := r.Storage.Archive(&artifact, dir, nil); err != nil {
|
||||
e := serror.NewGeneric(
|
||||
fmt.Errorf("unable to archive artifact to storage: %s", err),
|
||||
sourcev1.ArchiveOperationFailedReason,
|
||||
)
|
||||
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e)
|
||||
e := &serror.Event{
|
||||
Err: fmt.Errorf("unable to archive artifact to storage: %s", err),
|
||||
Reason: sourcev1.ArchiveOperationFailedReason,
|
||||
}
|
||||
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
|
||||
return sreconcile.ResultEmpty, e
|
||||
}
|
||||
|
||||
|
@ -708,10 +669,10 @@ func (r *BucketReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.Bu
|
|||
func (r *BucketReconciler) garbageCollect(ctx context.Context, obj *sourcev1.Bucket) error {
|
||||
if !obj.DeletionTimestamp.IsZero() {
|
||||
if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil {
|
||||
return serror.NewGeneric(
|
||||
fmt.Errorf("garbage collection for deleted resource failed: %s", err),
|
||||
"GarbageCollectionFailed",
|
||||
)
|
||||
return &serror.Event{
|
||||
Err: fmt.Errorf("garbage collection for deleted resource failed: %s", err),
|
||||
Reason: "GarbageCollectionFailed",
|
||||
}
|
||||
} else if deleted != "" {
|
||||
r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded",
|
||||
"garbage collected artifacts for deleted resource")
|
||||
|
@ -722,29 +683,29 @@ func (r *BucketReconciler) garbageCollect(ctx context.Context, obj *sourcev1.Buc
|
|||
if obj.GetArtifact() != nil {
|
||||
delFiles, err := r.Storage.GarbageCollect(ctx, *obj.GetArtifact(), time.Second*5)
|
||||
if err != nil {
|
||||
return serror.NewGeneric(
|
||||
fmt.Errorf("garbage collection of artifacts failed: %w", err),
|
||||
"GarbageCollectionFailed",
|
||||
)
|
||||
return &serror.Event{
|
||||
Err: fmt.Errorf("garbage collection of artifacts failed: %w", err),
|
||||
Reason: "GarbageCollectionFailed",
|
||||
}
|
||||
}
|
||||
if len(delFiles) > 0 {
|
||||
r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded",
|
||||
"garbage collected %d artifacts", len(delFiles))
|
||||
fmt.Sprintf("garbage collected %d artifacts", len(delFiles)))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getSecret attempts to fetch a Secret reference if specified. It returns any client error.
|
||||
func (r *BucketReconciler) getSecret(ctx context.Context, secretRef *meta.LocalObjectReference,
|
||||
namespace string) (*corev1.Secret, error) {
|
||||
if secretRef == nil {
|
||||
// getBucketSecret attempts to fetch the Secret reference if specified on the
|
||||
// obj. It returns any client error.
|
||||
func (r *BucketReconciler) getBucketSecret(ctx context.Context, obj *sourcev1.Bucket) (*corev1.Secret, error) {
|
||||
if obj.Spec.SecretRef == nil {
|
||||
return nil, nil
|
||||
}
|
||||
secretName := types.NamespacedName{
|
||||
Namespace: namespace,
|
||||
Name: secretRef.Name,
|
||||
Namespace: obj.GetNamespace(),
|
||||
Name: obj.Spec.SecretRef.Name,
|
||||
}
|
||||
secret := &corev1.Secret{}
|
||||
if err := r.Get(ctx, secretName, secret); err != nil {
|
||||
|
@ -753,68 +714,6 @@ func (r *BucketReconciler) getSecret(ctx context.Context, secretRef *meta.LocalO
|
|||
return secret, nil
|
||||
}
|
||||
|
||||
// getTLSConfig attempts to fetch a TLS configuration from the given
|
||||
// Secret reference, namespace and endpoint.
|
||||
func (r *BucketReconciler) getTLSConfig(ctx context.Context,
|
||||
secretRef *meta.LocalObjectReference, namespace, endpoint string) (*stdtls.Config, error) {
|
||||
certSecret, err := r.getSecret(ctx, secretRef, namespace)
|
||||
if err != nil || certSecret == nil {
|
||||
return nil, err
|
||||
}
|
||||
tlsConfig, _, err := tls.KubeTLSClientConfigFromSecret(*certSecret, endpoint)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create TLS config: %w", err)
|
||||
}
|
||||
if tlsConfig == nil {
|
||||
return nil, fmt.Errorf("certificate secret does not contain any TLS configuration")
|
||||
}
|
||||
return tlsConfig, nil
|
||||
}
|
||||
|
||||
// getProxyURL attempts to fetch a proxy URL from the object's proxy secret
|
||||
// reference.
|
||||
func (r *BucketReconciler) getProxyURL(ctx context.Context, obj *sourcev1.Bucket) (*url.URL, error) {
|
||||
namespace := obj.GetNamespace()
|
||||
proxySecret, err := r.getSecret(ctx, obj.Spec.ProxySecretRef, namespace)
|
||||
if err != nil || proxySecret == nil {
|
||||
return nil, err
|
||||
}
|
||||
proxyData := proxySecret.Data
|
||||
address, ok := proxyData["address"]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid proxy secret '%s/%s': key 'address' is missing",
|
||||
namespace, obj.Spec.ProxySecretRef.Name)
|
||||
}
|
||||
proxyURL, err := url.Parse(string(address))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse proxy address '%s': %w", address, err)
|
||||
}
|
||||
user, hasUser := proxyData["username"]
|
||||
password, hasPassword := proxyData["password"]
|
||||
if hasUser || hasPassword {
|
||||
proxyURL.User = url.UserPassword(string(user), string(password))
|
||||
}
|
||||
return proxyURL, nil
|
||||
}
|
||||
|
||||
// getSTSSecret attempts to fetch the secret from the object's STS secret
|
||||
// reference.
|
||||
func (r *BucketReconciler) getSTSSecret(ctx context.Context, obj *sourcev1.Bucket) (*corev1.Secret, error) {
|
||||
if obj.Spec.STS == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return r.getSecret(ctx, obj.Spec.STS.SecretRef, obj.GetNamespace())
|
||||
}
|
||||
|
||||
// getSTSTLSConfig attempts to fetch the certificate secret from the object's
|
||||
// STS configuration.
|
||||
func (r *BucketReconciler) getSTSTLSConfig(ctx context.Context, obj *sourcev1.Bucket) (*stdtls.Config, error) {
|
||||
if obj.Spec.STS == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return r.getTLSConfig(ctx, obj.Spec.STS.CertSecretRef, obj.GetNamespace(), obj.Spec.STS.Endpoint)
|
||||
}
|
||||
|
||||
// eventLogf records events, and logs at the same time.
|
||||
//
|
||||
// This log is different from the debug log in the EventRecorder, in the sense
|
||||
|
@ -845,7 +744,7 @@ func (r *BucketReconciler) annotatedEventLogf(ctx context.Context,
|
|||
// bucket using the given provider, while filtering them using .sourceignore
|
||||
// rules. After fetching an object, the etag value in the index is updated to
|
||||
// the current value to ensure accuracy.
|
||||
func fetchEtagIndex(ctx context.Context, provider BucketProvider, obj *sourcev1.Bucket, index *index.Digester, tempDir string) error {
|
||||
func fetchEtagIndex(ctx context.Context, provider BucketProvider, obj *sourcev1.Bucket, index *etagIndex, tempDir string) error {
|
||||
ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration)
|
||||
defer cancel()
|
||||
|
||||
|
@ -863,7 +762,7 @@ func fetchEtagIndex(ctx context.Context, provider BucketProvider, obj *sourcev1.
|
|||
path := filepath.Join(tempDir, sourceignore.IgnoreFile)
|
||||
if _, err := provider.FGetObject(ctxTimeout, obj.Spec.BucketName, sourceignore.IgnoreFile, path); err != nil {
|
||||
if !provider.ObjectIsNotFound(err) {
|
||||
return fmt.Errorf("failed to get Etag for '%s' object: %w", sourceignore.IgnoreFile, serror.SanitizeError(err))
|
||||
return err
|
||||
}
|
||||
}
|
||||
ps, err := sourceignore.ReadIgnoreFile(path, nil)
|
||||
|
@ -877,7 +776,7 @@ func fetchEtagIndex(ctx context.Context, provider BucketProvider, obj *sourcev1.
|
|||
matcher := sourceignore.NewMatcher(ps)
|
||||
|
||||
// Build up index
|
||||
err = provider.VisitObjects(ctxTimeout, obj.Spec.BucketName, obj.Spec.Prefix, func(key, etag string) error {
|
||||
err = provider.VisitObjects(ctxTimeout, obj.Spec.BucketName, func(key, etag string) error {
|
||||
if strings.HasSuffix(key, "/") || key == sourceignore.IgnoreFile {
|
||||
return nil
|
||||
}
|
||||
|
@ -899,7 +798,7 @@ func fetchEtagIndex(ctx context.Context, provider BucketProvider, obj *sourcev1.
|
|||
// using the given provider, and stores them into tempDir. It downloads in
|
||||
// parallel, but limited to the maxConcurrentBucketFetches.
|
||||
// Given an index is provided, the bucket is assumed to exist.
|
||||
func fetchIndexFiles(ctx context.Context, provider BucketProvider, obj *sourcev1.Bucket, index *index.Digester, tempDir string) error {
|
||||
func fetchIndexFiles(ctx context.Context, provider BucketProvider, obj *sourcev1.Bucket, index *etagIndex, tempDir string) error {
|
||||
ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration)
|
||||
defer cancel()
|
||||
|
||||
|
@ -927,7 +826,7 @@ func fetchIndexFiles(ctx context.Context, provider BucketProvider, obj *sourcev1
|
|||
index.Delete(k)
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("failed to get '%s' object: %w", k, serror.SanitizeError(err))
|
||||
return fmt.Errorf("failed to get '%s' object: %w", k, err)
|
||||
}
|
||||
if t != etag {
|
||||
index.Add(k, etag)
|
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@ -27,8 +27,7 @@ import (
|
|||
"gotest.tools/assert"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
sourcev1 "github.com/fluxcd/source-controller/api/v1"
|
||||
"github.com/fluxcd/source-controller/internal/index"
|
||||
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
|
||||
)
|
||||
|
||||
type mockBucketObject struct {
|
||||
|
@ -41,7 +40,7 @@ type mockBucketClient struct {
|
|||
objects map[string]mockBucketObject
|
||||
}
|
||||
|
||||
var errMockNotFound = fmt.Errorf("not found")
|
||||
var mockNotFound = fmt.Errorf("not found")
|
||||
|
||||
func (m mockBucketClient) BucketExists(_ context.Context, name string) (bool, error) {
|
||||
return name == m.bucketName, nil
|
||||
|
@ -57,7 +56,7 @@ func (m mockBucketClient) FGetObject(_ context.Context, bucket, obj, path string
|
|||
}
|
||||
object, ok := m.objects[obj]
|
||||
if !ok {
|
||||
return "", errMockNotFound
|
||||
return "", mockNotFound
|
||||
}
|
||||
if err := os.WriteFile(path, []byte(object.data), os.FileMode(0660)); err != nil {
|
||||
return "", err
|
||||
|
@ -66,10 +65,10 @@ func (m mockBucketClient) FGetObject(_ context.Context, bucket, obj, path string
|
|||
}
|
||||
|
||||
func (m mockBucketClient) ObjectIsNotFound(e error) bool {
|
||||
return e == errMockNotFound
|
||||
return e == mockNotFound
|
||||
}
|
||||
|
||||
func (m mockBucketClient) VisitObjects(_ context.Context, _ string, _ string, f func(key, etag string) error) error {
|
||||
func (m mockBucketClient) VisitObjects(_ context.Context, _ string, f func(key, etag string) error) error {
|
||||
for key, obj := range m.objects {
|
||||
if err := f(key, obj.etag); err != nil {
|
||||
return err
|
||||
|
@ -78,7 +77,9 @@ func (m mockBucketClient) VisitObjects(_ context.Context, _ string, _ string, f
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m mockBucketClient) Close(_ context.Context) {}
|
||||
func (m mockBucketClient) Close(_ context.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
func (m *mockBucketClient) addObject(key string, object mockBucketObject) {
|
||||
if m.objects == nil {
|
||||
|
@ -87,8 +88,8 @@ func (m *mockBucketClient) addObject(key string, object mockBucketObject) {
|
|||
m.objects[key] = object
|
||||
}
|
||||
|
||||
func (m *mockBucketClient) objectsToDigestIndex() *index.Digester {
|
||||
i := index.NewDigester()
|
||||
func (m *mockBucketClient) objectsToEtagIndex() *etagIndex {
|
||||
i := newEtagIndex()
|
||||
for k, v := range m.objects {
|
||||
i.Add(k, v.etag)
|
||||
}
|
||||
|
@ -113,7 +114,7 @@ func Test_fetchEtagIndex(t *testing.T) {
|
|||
client.addObject("bar.yaml", mockBucketObject{data: "bar.yaml", etag: "etag2"})
|
||||
client.addObject("baz.yaml", mockBucketObject{data: "baz.yaml", etag: "etag3"})
|
||||
|
||||
index := index.NewDigester()
|
||||
index := newEtagIndex()
|
||||
err := fetchEtagIndex(context.TODO(), client, bucket.DeepCopy(), index, tmp)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -127,7 +128,7 @@ func Test_fetchEtagIndex(t *testing.T) {
|
|||
|
||||
client := mockBucketClient{bucketName: "other-bucket-name"}
|
||||
|
||||
index := index.NewDigester()
|
||||
index := newEtagIndex()
|
||||
err := fetchEtagIndex(context.TODO(), client, bucket.DeepCopy(), index, tmp)
|
||||
assert.ErrorContains(t, err, "not found")
|
||||
})
|
||||
|
@ -140,7 +141,7 @@ func Test_fetchEtagIndex(t *testing.T) {
|
|||
client.addObject("foo.yaml", mockBucketObject{etag: "etag1", data: "foo.yaml"})
|
||||
client.addObject("foo.txt", mockBucketObject{etag: "etag2", data: "foo.txt"})
|
||||
|
||||
index := index.NewDigester()
|
||||
index := newEtagIndex()
|
||||
err := fetchEtagIndex(context.TODO(), client, bucket.DeepCopy(), index, tmp)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -167,7 +168,7 @@ func Test_fetchEtagIndex(t *testing.T) {
|
|||
bucket := bucket.DeepCopy()
|
||||
bucket.Spec.Ignore = &ignore
|
||||
|
||||
index := index.NewDigester()
|
||||
index := newEtagIndex()
|
||||
err := fetchEtagIndex(context.TODO(), client, bucket.DeepCopy(), index, tmp)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -202,7 +203,7 @@ func Test_fetchFiles(t *testing.T) {
|
|||
client.addObject("bar.yaml", mockBucketObject{data: "bar.yaml", etag: "etag2"})
|
||||
client.addObject("baz.yaml", mockBucketObject{data: "baz.yaml", etag: "etag3"})
|
||||
|
||||
index := client.objectsToDigestIndex()
|
||||
index := client.objectsToEtagIndex()
|
||||
|
||||
err := fetchIndexFiles(context.TODO(), client, bucket.DeepCopy(), index, tmp)
|
||||
if err != nil {
|
||||
|
@ -224,7 +225,7 @@ func Test_fetchFiles(t *testing.T) {
|
|||
client := mockBucketClient{bucketName: bucketName, objects: map[string]mockBucketObject{}}
|
||||
client.objects["error"] = mockBucketObject{}
|
||||
|
||||
err := fetchIndexFiles(context.TODO(), client, bucket.DeepCopy(), client.objectsToDigestIndex(), tmp)
|
||||
err := fetchIndexFiles(context.TODO(), client, bucket.DeepCopy(), client.objectsToEtagIndex(), tmp)
|
||||
if err == nil {
|
||||
t.Fatal("expected error but got nil")
|
||||
}
|
||||
|
@ -236,7 +237,7 @@ func Test_fetchFiles(t *testing.T) {
|
|||
client := mockBucketClient{bucketName: bucketName}
|
||||
client.addObject("foo.yaml", mockBucketObject{data: "foo.yaml", etag: "etag2"})
|
||||
|
||||
index := index.NewDigester()
|
||||
index := newEtagIndex()
|
||||
index.Add("foo.yaml", "etag1")
|
||||
err := fetchIndexFiles(context.TODO(), client, bucket.DeepCopy(), index, tmp)
|
||||
if err != nil {
|
||||
|
@ -252,7 +253,7 @@ func Test_fetchFiles(t *testing.T) {
|
|||
client := mockBucketClient{bucketName: bucketName}
|
||||
client.addObject("foo.yaml", mockBucketObject{data: "foo.yaml", etag: "etag1"})
|
||||
|
||||
index := index.NewDigester()
|
||||
index := newEtagIndex()
|
||||
index.Add("foo.yaml", "etag1")
|
||||
// Does not exist on server
|
||||
index.Add("bar.yaml", "etag2")
|
||||
|
@ -275,7 +276,7 @@ func Test_fetchFiles(t *testing.T) {
|
|||
f := fmt.Sprintf("file-%d", i)
|
||||
client.addObject(f, mockBucketObject{etag: f, data: f})
|
||||
}
|
||||
index := client.objectsToDigestIndex()
|
||||
index := client.objectsToEtagIndex()
|
||||
|
||||
err := fetchIndexFiles(context.TODO(), client, bucket.DeepCopy(), index, tmp)
|
||||
if err != nil {
|
File diff suppressed because it is too large
Load Diff
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -14,44 +14,38 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/go-units"
|
||||
"github.com/opencontainers/go-digest"
|
||||
helmgetter "helm.sh/helm/v3/pkg/getter"
|
||||
helmreg "helm.sh/helm/v3/pkg/registry"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
kuberecorder "k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/ratelimiter"
|
||||
|
||||
eventv1 "github.com/fluxcd/pkg/apis/event/v1beta1"
|
||||
"github.com/fluxcd/pkg/apis/meta"
|
||||
"github.com/fluxcd/pkg/runtime/conditions"
|
||||
helper "github.com/fluxcd/pkg/runtime/controller"
|
||||
"github.com/fluxcd/pkg/runtime/jitter"
|
||||
"github.com/fluxcd/pkg/runtime/patch"
|
||||
"github.com/fluxcd/pkg/runtime/predicates"
|
||||
rreconcile "github.com/fluxcd/pkg/runtime/reconcile"
|
||||
|
||||
sourcev1 "github.com/fluxcd/source-controller/api/v1"
|
||||
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
|
||||
"github.com/fluxcd/source-controller/internal/cache"
|
||||
intdigest "github.com/fluxcd/source-controller/internal/digest"
|
||||
serror "github.com/fluxcd/source-controller/internal/error"
|
||||
"github.com/fluxcd/source-controller/internal/helm/getter"
|
||||
"github.com/fluxcd/source-controller/internal/helm/repository"
|
||||
|
@ -61,7 +55,7 @@ import (
|
|||
)
|
||||
|
||||
// helmRepositoryReadyCondition contains the information required to summarize a
|
||||
// v1.HelmRepository Ready Condition.
|
||||
// v1beta2.HelmRepository Ready Condition.
|
||||
var helmRepositoryReadyCondition = summarize.Conditions{
|
||||
Target: meta.ReadyCondition,
|
||||
Owned: []string{
|
||||
|
@ -102,7 +96,7 @@ var helmRepositoryFailConditions = []string{
|
|||
// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmrepositories/finalizers,verbs=get;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch
|
||||
|
||||
// HelmRepositoryReconciler reconciles a v1.HelmRepository object.
|
||||
// HelmRepositoryReconciler reconciles a v1beta2.HelmRepository object.
|
||||
type HelmRepositoryReconciler struct {
|
||||
client.Client
|
||||
kuberecorder.EventRecorder
|
||||
|
@ -115,37 +109,39 @@ type HelmRepositoryReconciler struct {
|
|||
Cache *cache.Cache
|
||||
TTL time.Duration
|
||||
*cache.CacheRecorder
|
||||
|
||||
patchOptions []patch.Option
|
||||
}
|
||||
|
||||
type HelmRepositoryReconcilerOptions struct {
|
||||
RateLimiter workqueue.TypedRateLimiter[reconcile.Request]
|
||||
MaxConcurrentReconciles int
|
||||
RateLimiter ratelimiter.RateLimiter
|
||||
}
|
||||
|
||||
// helmRepositoryReconcileFunc is the function type for all the
|
||||
// v1.HelmRepository (sub)reconcile functions. The type implementations
|
||||
// v1beta2.HelmRepository (sub)reconcile functions. The type implementations
|
||||
// are grouped and executed serially to perform the complete reconcile of the
|
||||
// object.
|
||||
type helmRepositoryReconcileFunc func(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error)
|
||||
type helmRepositoryReconcileFunc func(ctx context.Context, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, repo *repository.ChartRepository) (sreconcile.Result, error)
|
||||
|
||||
func (r *HelmRepositoryReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return r.SetupWithManagerAndOptions(mgr, HelmRepositoryReconcilerOptions{})
|
||||
}
|
||||
|
||||
func (r *HelmRepositoryReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts HelmRepositoryReconcilerOptions) error {
|
||||
r.patchOptions = getPatchOptions(helmRepositoryReadyCondition.Owned, r.ControllerName)
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&sourcev1.HelmRepository{}).
|
||||
WithEventFilter(
|
||||
predicate.And(
|
||||
intpredicates.HelmRepositoryOCIMigrationPredicate{},
|
||||
predicate.Or(
|
||||
intpredicates.HelmRepositoryTypePredicate{RepositoryType: sourcev1.HelmRepositoryTypeDefault},
|
||||
intpredicates.HelmRepositoryTypePredicate{RepositoryType: ""},
|
||||
),
|
||||
predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{}),
|
||||
),
|
||||
).
|
||||
WithOptions(controller.Options{
|
||||
RateLimiter: opts.RateLimiter,
|
||||
MaxConcurrentReconciles: opts.MaxConcurrentReconciles,
|
||||
RateLimiter: opts.RateLimiter,
|
||||
RecoverPanic: true,
|
||||
}).
|
||||
Complete(r)
|
||||
}
|
||||
|
@ -160,12 +156,13 @@ func (r *HelmRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
|||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
// Initialize the patch helper with the current version of the object.
|
||||
serialPatcher := patch.NewSerialPatcher(obj, r.Client)
|
||||
// Record suspended status metric
|
||||
r.RecordSuspend(ctx, obj, obj.Spec.Suspend)
|
||||
|
||||
// If it's of type OCI, migrate the object to static.
|
||||
if obj.Spec.Type == sourcev1.HelmRepositoryTypeOCI {
|
||||
return r.migrationToStatic(ctx, serialPatcher, obj)
|
||||
// Initialize the patch helper with the current version of the object.
|
||||
patchHelper, err := patch.NewHelper(obj, r.Client)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// recResult stores the abstracted reconcile result.
|
||||
|
@ -174,43 +171,41 @@ func (r *HelmRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
|||
// Always attempt to patch the object after each reconciliation.
|
||||
// NOTE: The final runtime result and error are set in this block.
|
||||
defer func() {
|
||||
summarizeHelper := summarize.NewHelper(r.EventRecorder, serialPatcher)
|
||||
summarizeHelper := summarize.NewHelper(r.EventRecorder, patchHelper)
|
||||
summarizeOpts := []summarize.Option{
|
||||
summarize.WithConditions(helmRepositoryReadyCondition),
|
||||
summarize.WithReconcileResult(recResult),
|
||||
summarize.WithReconcileError(retErr),
|
||||
summarize.WithIgnoreNotFound(),
|
||||
summarize.WithProcessors(
|
||||
summarize.ErrorActionHandler,
|
||||
summarize.RecordContextualError,
|
||||
summarize.RecordReconcileReq,
|
||||
),
|
||||
summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{
|
||||
RequeueAfter: jitter.JitteredIntervalDuration(obj.GetRequeueAfter()),
|
||||
}),
|
||||
summarize.WithResultBuilder(sreconcile.AlwaysRequeueResultBuilder{RequeueAfter: obj.GetRequeueAfter()}),
|
||||
summarize.WithPatchFieldOwner(r.ControllerName),
|
||||
}
|
||||
result, retErr = summarizeHelper.SummarizeAndPatch(ctx, obj, summarizeOpts...)
|
||||
|
||||
// Always record duration metrics.
|
||||
// Always record readiness and duration metrics
|
||||
r.Metrics.RecordReadiness(ctx, obj)
|
||||
r.Metrics.RecordDuration(ctx, obj, start)
|
||||
}()
|
||||
|
||||
// Examine if the object is under deletion.
|
||||
if !obj.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
recResult, retErr = r.reconcileDelete(ctx, obj)
|
||||
return
|
||||
}
|
||||
|
||||
// Add finalizer first if not exist to avoid the race condition
|
||||
// between init and delete.
|
||||
// Note: Finalizers in general can only be added when the deletionTimestamp
|
||||
// is not set.
|
||||
// between init and delete
|
||||
if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) {
|
||||
controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer)
|
||||
recResult = sreconcile.ResultRequeue
|
||||
return
|
||||
}
|
||||
|
||||
// Examine if the object is under deletion
|
||||
// or if a type change has happened
|
||||
if !obj.ObjectMeta.DeletionTimestamp.IsZero() || (obj.Spec.Type != "" && obj.Spec.Type != sourcev1.HelmRepositoryTypeDefault) {
|
||||
recResult, retErr = r.reconcileDelete(ctx, obj)
|
||||
return
|
||||
}
|
||||
|
||||
// Return if the object is suspended.
|
||||
if obj.Spec.Suspend {
|
||||
log.Info("reconciliation is suspended for this object")
|
||||
|
@ -224,36 +219,19 @@ func (r *HelmRepositoryReconciler) Reconcile(ctx context.Context, req ctrl.Reque
|
|||
r.reconcileSource,
|
||||
r.reconcileArtifact,
|
||||
}
|
||||
recResult, retErr = r.reconcile(ctx, serialPatcher, obj, reconcilers)
|
||||
recResult, retErr = r.reconcile(ctx, obj, reconcilers)
|
||||
return
|
||||
}
|
||||
|
||||
// reconcile iterates through the helmRepositoryReconcileFunc tasks for the
|
||||
// object. It returns early on the first call that returns
|
||||
// reconcile.ResultRequeue, or produces an error.
|
||||
func (r *HelmRepositoryReconciler) reconcile(ctx context.Context, sp *patch.SerialPatcher,
|
||||
obj *sourcev1.HelmRepository, reconcilers []helmRepositoryReconcileFunc) (sreconcile.Result, error) {
|
||||
func (r *HelmRepositoryReconciler) reconcile(ctx context.Context, obj *sourcev1.HelmRepository, reconcilers []helmRepositoryReconcileFunc) (sreconcile.Result, error) {
|
||||
oldObj := obj.DeepCopy()
|
||||
|
||||
rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason, "reconciliation in progress")
|
||||
|
||||
var reconcileAtVal string
|
||||
if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok {
|
||||
reconcileAtVal = v
|
||||
}
|
||||
|
||||
// Persist reconciling if generation differs or reconciliation is requested.
|
||||
switch {
|
||||
case obj.Generation != obj.Status.ObservedGeneration:
|
||||
rreconcile.ProgressiveStatus(false, obj, meta.ProgressingReason,
|
||||
"processing object: new generation %d -> %d", obj.Status.ObservedGeneration, obj.Generation)
|
||||
if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil {
|
||||
return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason)
|
||||
}
|
||||
case reconcileAtVal != obj.Status.GetLastHandledReconcileRequest():
|
||||
if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil {
|
||||
return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason)
|
||||
}
|
||||
// Mark as reconciling if generation differs.
|
||||
if obj.Generation != obj.Status.ObservedGeneration {
|
||||
conditions.MarkReconciling(obj, "NewGeneration", "reconciling new object generation (%d)", obj.Generation)
|
||||
}
|
||||
|
||||
var chartRepo repository.ChartRepository
|
||||
|
@ -263,7 +241,7 @@ func (r *HelmRepositoryReconciler) reconcile(ctx context.Context, sp *patch.Seri
|
|||
var res sreconcile.Result
|
||||
var resErr error
|
||||
for _, rec := range reconcilers {
|
||||
recResult, err := rec(ctx, sp, obj, &artifact, &chartRepo)
|
||||
recResult, err := rec(ctx, obj, &artifact, &chartRepo)
|
||||
// Exit immediately on ResultRequeue.
|
||||
if recResult == sreconcile.ResultRequeue {
|
||||
return sreconcile.ResultRequeue, nil
|
||||
|
@ -279,19 +257,19 @@ func (r *HelmRepositoryReconciler) reconcile(ctx context.Context, sp *patch.Seri
|
|||
res = sreconcile.LowestRequeuingResult(res, recResult)
|
||||
}
|
||||
|
||||
r.notify(ctx, oldObj, obj, &chartRepo, res, resErr)
|
||||
r.notify(ctx, oldObj, obj, chartRepo, res, resErr)
|
||||
|
||||
return res, resErr
|
||||
}
|
||||
|
||||
// notify emits notification related to the reconciliation.
|
||||
func (r *HelmRepositoryReconciler) notify(ctx context.Context, oldObj, newObj *sourcev1.HelmRepository, chartRepo *repository.ChartRepository, res sreconcile.Result, resErr error) {
|
||||
func (r *HelmRepositoryReconciler) notify(ctx context.Context, oldObj, newObj *sourcev1.HelmRepository, chartRepo repository.ChartRepository, res sreconcile.Result, resErr error) {
|
||||
// Notify successful reconciliation for new artifact and recovery from any
|
||||
// failure.
|
||||
if resErr == nil && res == sreconcile.ResultSuccess && newObj.Status.Artifact != nil {
|
||||
annotations := map[string]string{
|
||||
fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaRevisionKey): newObj.Status.Artifact.Revision,
|
||||
fmt.Sprintf("%s/%s", sourcev1.GroupVersion.Group, eventv1.MetaDigestKey): newObj.Status.Artifact.Digest,
|
||||
sourcev1.GroupVersion.Group + "/revision": newObj.Status.Artifact.Revision,
|
||||
sourcev1.GroupVersion.Group + "/checksum": newObj.Status.Artifact.Checksum,
|
||||
}
|
||||
|
||||
humanReadableSize := "unknown size"
|
||||
|
@ -299,10 +277,15 @@ func (r *HelmRepositoryReconciler) notify(ctx context.Context, oldObj, newObj *s
|
|||
humanReadableSize = fmt.Sprintf("size %s", units.HumanSize(float64(*size)))
|
||||
}
|
||||
|
||||
var oldChecksum string
|
||||
if oldObj.GetArtifact() != nil {
|
||||
oldChecksum = oldObj.GetArtifact().Checksum
|
||||
}
|
||||
|
||||
message := fmt.Sprintf("stored fetched index of %s from '%s'", humanReadableSize, chartRepo.URL)
|
||||
|
||||
// Notify on new artifact and failure recovery.
|
||||
if !oldObj.GetArtifact().HasDigest(newObj.GetArtifact().Digest) {
|
||||
if oldChecksum != newObj.GetArtifact().Checksum {
|
||||
r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal,
|
||||
"NewArtifact", message)
|
||||
ctrl.LoggerFrom(ctx).Info(message)
|
||||
|
@ -310,8 +293,8 @@ func (r *HelmRepositoryReconciler) notify(ctx context.Context, oldObj, newObj *s
|
|||
if sreconcile.FailureRecovery(oldObj, newObj, helmRepositoryFailConditions) {
|
||||
r.AnnotatedEventf(newObj, annotations, corev1.EventTypeNormal,
|
||||
meta.SucceededReason, message)
|
||||
ctrl.LoggerFrom(ctx).Info(message)
|
||||
}
|
||||
ctrl.LoggerFrom(ctx).Info(message)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -328,50 +311,22 @@ func (r *HelmRepositoryReconciler) notify(ctx context.Context, oldObj, newObj *s
|
|||
// condition is added.
|
||||
// The hostname of any URL in the Status of the object are updated, to ensure
|
||||
// they match the Storage server hostname of current runtime.
|
||||
func (r *HelmRepositoryReconciler) reconcileStorage(ctx context.Context, sp *patch.SerialPatcher,
|
||||
obj *sourcev1.HelmRepository, _ *sourcev1.Artifact, _ *repository.ChartRepository) (sreconcile.Result, error) {
|
||||
func (r *HelmRepositoryReconciler) reconcileStorage(ctx context.Context, obj *sourcev1.HelmRepository, _ *sourcev1.Artifact, _ *repository.ChartRepository) (sreconcile.Result, error) {
|
||||
// Garbage collect previous advertised artifact(s) from storage
|
||||
_ = r.garbageCollect(ctx, obj)
|
||||
|
||||
var artifactMissing bool
|
||||
if artifact := obj.GetArtifact(); artifact != nil {
|
||||
// Determine if the advertised artifact is still in storage
|
||||
if !r.Storage.ArtifactExist(*artifact) {
|
||||
artifactMissing = true
|
||||
}
|
||||
|
||||
// If the artifact is in storage, verify if the advertised digest still
|
||||
// matches the actual artifact
|
||||
if !artifactMissing {
|
||||
if err := r.Storage.VerifyArtifact(*artifact); err != nil {
|
||||
r.Eventf(obj, corev1.EventTypeWarning, "ArtifactVerificationFailed", "failed to verify integrity of artifact: %s", err.Error())
|
||||
|
||||
if err = r.Storage.Remove(*artifact); err != nil {
|
||||
return sreconcile.ResultEmpty, fmt.Errorf("failed to remove artifact after digest mismatch: %w", err)
|
||||
}
|
||||
|
||||
artifactMissing = true
|
||||
}
|
||||
}
|
||||
|
||||
// If the artifact is missing, remove it from the object
|
||||
if artifactMissing {
|
||||
obj.Status.Artifact = nil
|
||||
obj.Status.URL = ""
|
||||
}
|
||||
// Determine if the advertised artifact is still in storage
|
||||
if artifact := obj.GetArtifact(); artifact != nil && !r.Storage.ArtifactExist(*artifact) {
|
||||
obj.Status.Artifact = nil
|
||||
obj.Status.URL = ""
|
||||
// Remove the condition as the artifact doesn't exist.
|
||||
conditions.Delete(obj, sourcev1.ArtifactInStorageCondition)
|
||||
}
|
||||
|
||||
// Record that we do not have an artifact
|
||||
if obj.GetArtifact() == nil {
|
||||
msg := "building artifact"
|
||||
if artifactMissing {
|
||||
msg += ": disappeared from storage"
|
||||
}
|
||||
rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "%s", msg)
|
||||
conditions.MarkReconciling(obj, "NoArtifact", "no artifact for resource in storage")
|
||||
conditions.Delete(obj, sourcev1.ArtifactInStorageCondition)
|
||||
if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil {
|
||||
return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason)
|
||||
}
|
||||
return sreconcile.ResultSuccess, nil
|
||||
}
|
||||
|
||||
|
@ -384,138 +339,136 @@ func (r *HelmRepositoryReconciler) reconcileStorage(ctx context.Context, sp *pat
|
|||
}
|
||||
|
||||
// reconcileSource attempts to fetch the Helm repository index using the
|
||||
// specified configuration on the v1.HelmRepository object.
|
||||
// specified configuration on the v1beta2.HelmRepository object.
|
||||
//
|
||||
// When the fetch fails, it records v1.FetchFailedCondition=True and
|
||||
// When the fetch fails, it records v1beta2.FetchFailedCondition=True and
|
||||
// returns early.
|
||||
// If successful and the index is valid, any previous
|
||||
// v1.FetchFailedCondition is removed, and the repository.ChartRepository
|
||||
// v1beta2.FetchFailedCondition is removed, and the repository.ChartRepository
|
||||
// pointer is set to the newly fetched index.
|
||||
func (r *HelmRepositoryReconciler) reconcileSource(ctx context.Context, sp *patch.SerialPatcher,
|
||||
obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, chartRepo *repository.ChartRepository) (sreconcile.Result, error) {
|
||||
// Ensure it's not an OCI URL. API validation ensures that only
|
||||
// http/https/oci scheme are allowed.
|
||||
if strings.HasPrefix(obj.Spec.URL, helmreg.OCIScheme) {
|
||||
err := fmt.Errorf("'oci' URL scheme cannot be used with 'default' HelmRepository type")
|
||||
e := serror.NewStalling(
|
||||
fmt.Errorf("invalid Helm repository URL: %w", err),
|
||||
sourcev1.URLInvalidReason,
|
||||
)
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e)
|
||||
return sreconcile.ResultEmpty, e
|
||||
func (r *HelmRepositoryReconciler) reconcileSource(ctx context.Context, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, chartRepo *repository.ChartRepository) (sreconcile.Result, error) {
|
||||
var tlsConfig *tls.Config
|
||||
|
||||
// Configure Helm client to access repository
|
||||
clientOpts := []helmgetter.Option{
|
||||
helmgetter.WithTimeout(obj.Spec.Timeout.Duration),
|
||||
helmgetter.WithURL(obj.Spec.URL),
|
||||
helmgetter.WithPassCredentialsAll(obj.Spec.PassCredentials),
|
||||
}
|
||||
|
||||
normalizedURL, err := repository.NormalizeURL(obj.Spec.URL)
|
||||
if err != nil {
|
||||
e := serror.NewStalling(
|
||||
fmt.Errorf("invalid Helm repository URL: %w", err),
|
||||
sourcev1.URLInvalidReason,
|
||||
)
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e)
|
||||
return sreconcile.ResultEmpty, e
|
||||
}
|
||||
// Configure any authentication related options
|
||||
if obj.Spec.SecretRef != nil {
|
||||
// Attempt to retrieve secret
|
||||
name := types.NamespacedName{
|
||||
Namespace: obj.GetNamespace(),
|
||||
Name: obj.Spec.SecretRef.Name,
|
||||
}
|
||||
var secret corev1.Secret
|
||||
if err := r.Client.Get(ctx, name, &secret); err != nil {
|
||||
e := &serror.Event{
|
||||
Err: fmt.Errorf("failed to get secret '%s': %w", name.String(), err),
|
||||
Reason: sourcev1.AuthenticationFailedReason,
|
||||
}
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
|
||||
return sreconcile.ResultEmpty, e
|
||||
}
|
||||
|
||||
clientOpts, _, err := getter.GetClientOpts(ctx, r.Client, obj, normalizedURL)
|
||||
if err != nil {
|
||||
if errors.Is(err, getter.ErrDeprecatedTLSConfig) {
|
||||
ctrl.LoggerFrom(ctx).
|
||||
Info("warning: specifying TLS authentication data via `.spec.secretRef` is deprecated, please use `.spec.certSecretRef` instead")
|
||||
} else {
|
||||
e := serror.NewGeneric(
|
||||
err,
|
||||
sourcev1.AuthenticationFailedReason,
|
||||
)
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e)
|
||||
// Construct actual options
|
||||
opts, err := getter.ClientOptionsFromSecret(secret)
|
||||
if err != nil {
|
||||
e := &serror.Event{
|
||||
Err: fmt.Errorf("failed to configure Helm client with secret data: %w", err),
|
||||
Reason: sourcev1.AuthenticationFailedReason,
|
||||
}
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
|
||||
// Return err as the content of the secret may change.
|
||||
return sreconcile.ResultEmpty, e
|
||||
}
|
||||
clientOpts = append(clientOpts, opts...)
|
||||
|
||||
tlsConfig, err = getter.TLSClientConfigFromSecret(secret, obj.Spec.URL)
|
||||
if err != nil {
|
||||
e := &serror.Event{
|
||||
Err: fmt.Errorf("failed to create TLS client config with secret data: %w", err),
|
||||
Reason: sourcev1.AuthenticationFailedReason,
|
||||
}
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
|
||||
// Requeue as content of secret might change
|
||||
return sreconcile.ResultEmpty, e
|
||||
}
|
||||
}
|
||||
|
||||
// Construct Helm chart repository with options and download index
|
||||
newChartRepo, err := repository.NewChartRepository(obj.Spec.URL, "", r.Getters, clientOpts.TlsConfig, clientOpts.GetterOpts...)
|
||||
newChartRepo, err := repository.NewChartRepository(obj.Spec.URL, "", r.Getters, tlsConfig, clientOpts)
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case *url.Error:
|
||||
e := serror.NewStalling(
|
||||
fmt.Errorf("invalid Helm repository URL: %w", err),
|
||||
sourcev1.URLInvalidReason,
|
||||
)
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e)
|
||||
e := &serror.Stalling{
|
||||
Err: fmt.Errorf("invalid Helm repository URL: %w", err),
|
||||
Reason: sourcev1.URLInvalidReason,
|
||||
}
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
|
||||
return sreconcile.ResultEmpty, e
|
||||
default:
|
||||
e := serror.NewStalling(
|
||||
fmt.Errorf("failed to construct Helm client: %w", err),
|
||||
meta.FailedReason,
|
||||
)
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e)
|
||||
e := &serror.Stalling{
|
||||
Err: fmt.Errorf("failed to construct Helm client: %w", err),
|
||||
Reason: meta.FailedReason,
|
||||
}
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
|
||||
return sreconcile.ResultEmpty, e
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch the repository index from remote.
|
||||
if err := newChartRepo.CacheIndex(); err != nil {
|
||||
e := serror.NewGeneric(
|
||||
fmt.Errorf("failed to fetch Helm repository index: %w", err),
|
||||
meta.FailedReason,
|
||||
)
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e)
|
||||
checksum, err := newChartRepo.CacheIndex()
|
||||
if err != nil {
|
||||
e := &serror.Event{
|
||||
Err: fmt.Errorf("failed to fetch Helm repository index: %w", err),
|
||||
Reason: meta.FailedReason,
|
||||
}
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
|
||||
// Coin flip on transient or persistent error, return error and hope for the best
|
||||
return sreconcile.ResultEmpty, e
|
||||
}
|
||||
*chartRepo = *newChartRepo
|
||||
|
||||
// Early comparison to current Artifact.
|
||||
if curArtifact := obj.GetArtifact(); curArtifact != nil {
|
||||
curRev := digest.Digest(curArtifact.Revision)
|
||||
if curRev.Validate() == nil {
|
||||
// Short-circuit based on the fetched index being an exact match to the
|
||||
// stored Artifact.
|
||||
if newRev := chartRepo.Digest(curRev.Algorithm()); newRev.Validate() == nil && (newRev == curRev) {
|
||||
*artifact = *curArtifact
|
||||
conditions.Delete(obj, sourcev1.FetchFailedCondition)
|
||||
return sreconcile.ResultSuccess, nil
|
||||
}
|
||||
// Short-circuit based on the fetched index being an exact match to the
|
||||
// stored Artifact. This prevents having to unmarshal the YAML to calculate
|
||||
// the (stable) revision, which is a memory expensive operation.
|
||||
if obj.GetArtifact().HasChecksum(checksum) {
|
||||
*artifact = *obj.GetArtifact()
|
||||
conditions.Delete(obj, sourcev1.FetchFailedCondition)
|
||||
return sreconcile.ResultSuccess, nil
|
||||
}
|
||||
|
||||
// Load the cached repository index to ensure it passes validation. This
|
||||
// also populates chartRepo.Checksum.
|
||||
if err := chartRepo.LoadFromCache(); err != nil {
|
||||
e := &serror.Event{
|
||||
Err: fmt.Errorf("failed to load Helm repository from cache: %w", err),
|
||||
Reason: sourcev1.IndexationFailedReason,
|
||||
}
|
||||
}
|
||||
|
||||
// Load the cached repository index to ensure it passes validation.
|
||||
if err := chartRepo.LoadFromPath(); err != nil {
|
||||
e := serror.NewGeneric(
|
||||
fmt.Errorf("failed to load Helm repository from index YAML: %w", err),
|
||||
sourcev1.IndexationFailedReason,
|
||||
)
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e)
|
||||
return sreconcile.ResultEmpty, e
|
||||
}
|
||||
// Delete any stale failure observation
|
||||
conditions.Delete(obj, sourcev1.FetchFailedCondition)
|
||||
|
||||
// Calculate revision.
|
||||
revision := chartRepo.Digest(intdigest.Canonical)
|
||||
if revision.Validate() != nil {
|
||||
e := serror.NewGeneric(
|
||||
fmt.Errorf("failed to calculate revision: %w", err),
|
||||
sourcev1.IndexationFailedReason,
|
||||
)
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, "%s", e)
|
||||
conditions.MarkTrue(obj, sourcev1.FetchFailedCondition, e.Reason, e.Err.Error())
|
||||
return sreconcile.ResultEmpty, e
|
||||
}
|
||||
|
||||
// Mark observations about the revision on the object.
|
||||
message := fmt.Sprintf("new index revision '%s'", revision)
|
||||
if obj.GetArtifact() != nil {
|
||||
conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", "%s", message)
|
||||
}
|
||||
rreconcile.ProgressiveStatus(true, obj, meta.ProgressingReason, "building artifact: %s", message)
|
||||
if err := sp.Patch(ctx, obj, r.patchOptions...); err != nil {
|
||||
return sreconcile.ResultEmpty, serror.NewGeneric(err, sourcev1.PatchOperationFailedReason)
|
||||
if !obj.GetArtifact().HasRevision(chartRepo.Checksum) {
|
||||
message := fmt.Sprintf("new index revision '%s'", checksum)
|
||||
conditions.MarkTrue(obj, sourcev1.ArtifactOutdatedCondition, "NewRevision", message)
|
||||
conditions.MarkReconciling(obj, "NewRevision", message)
|
||||
}
|
||||
|
||||
// Create potential new artifact.
|
||||
// Note: Since this is a potential artifact, artifact.Checksum is empty at
|
||||
// this stage. It's populated when the artifact is written in storage.
|
||||
*artifact = r.Storage.NewArtifactFor(obj.Kind,
|
||||
obj.ObjectMeta.GetObjectMeta(),
|
||||
revision.String(),
|
||||
fmt.Sprintf("index-%s.yaml", revision.Encoded()),
|
||||
)
|
||||
chartRepo.Checksum,
|
||||
fmt.Sprintf("index-%s.yaml", checksum))
|
||||
|
||||
// Delete any stale failure observation
|
||||
conditions.Delete(obj, sourcev1.FetchFailedCondition)
|
||||
|
||||
return sreconcile.ResultSuccess, nil
|
||||
}
|
||||
|
@ -524,87 +477,65 @@ func (r *HelmRepositoryReconciler) reconcileSource(ctx context.Context, sp *patc
|
|||
// (Status) data on the object does not match the given.
|
||||
//
|
||||
// The inspection of the given data to the object is differed, ensuring any
|
||||
// stale observations like v1.ArtifactOutdatedCondition are removed.
|
||||
// stale observations like v1beta2.ArtifactOutdatedCondition are removed.
|
||||
// If the given Artifact does not differ from the object's current, it returns
|
||||
// early.
|
||||
// On a successful archive, the Artifact in the Status of the object is set,
|
||||
// and the symlink in the Storage is updated to its path.
|
||||
func (r *HelmRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, chartRepo *repository.ChartRepository) (sreconcile.Result, error) {
|
||||
func (r *HelmRepositoryReconciler) reconcileArtifact(ctx context.Context, obj *sourcev1.HelmRepository, artifact *sourcev1.Artifact, chartRepo *repository.ChartRepository) (sreconcile.Result, error) {
|
||||
// Set the ArtifactInStorageCondition if there's no drift.
|
||||
defer func() {
|
||||
if obj.GetArtifact().HasRevision(artifact.Revision) {
|
||||
conditions.Delete(obj, sourcev1.ArtifactOutdatedCondition)
|
||||
conditions.MarkTrue(obj, sourcev1.ArtifactInStorageCondition, meta.SucceededReason,
|
||||
"stored artifact: revision '%s'", artifact.Revision)
|
||||
"stored artifact for revision '%s'", artifact.Revision)
|
||||
}
|
||||
if err := chartRepo.Clear(); err != nil {
|
||||
|
||||
chartRepo.Unload()
|
||||
|
||||
if err := chartRepo.RemoveCache(); err != nil {
|
||||
ctrl.LoggerFrom(ctx).Error(err, "failed to remove temporary cached index file")
|
||||
}
|
||||
}()
|
||||
|
||||
if obj.GetArtifact().HasRevision(artifact.Revision) && obj.GetArtifact().HasDigest(artifact.Digest) {
|
||||
// Extend TTL of the Index in the cache (if present).
|
||||
if r.Cache != nil {
|
||||
r.Cache.SetExpiration(artifact.Path, r.TTL)
|
||||
}
|
||||
|
||||
if obj.GetArtifact().HasRevision(artifact.Revision) && obj.GetArtifact().HasChecksum(artifact.Checksum) {
|
||||
r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.ArtifactUpToDateReason, "artifact up-to-date with remote revision: '%s'", artifact.Revision)
|
||||
return sreconcile.ResultSuccess, nil
|
||||
}
|
||||
|
||||
// Create artifact dir
|
||||
if err := r.Storage.MkdirAll(*artifact); err != nil {
|
||||
e := serror.NewGeneric(
|
||||
fmt.Errorf("failed to create artifact directory: %w", err),
|
||||
sourcev1.DirCreationFailedReason,
|
||||
)
|
||||
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e)
|
||||
e := &serror.Event{
|
||||
Err: fmt.Errorf("failed to create artifact directory: %w", err),
|
||||
Reason: sourcev1.DirCreationFailedReason,
|
||||
}
|
||||
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
|
||||
return sreconcile.ResultEmpty, e
|
||||
}
|
||||
|
||||
// Acquire lock.
|
||||
unlock, err := r.Storage.Lock(*artifact)
|
||||
if err != nil {
|
||||
return sreconcile.ResultEmpty, serror.NewGeneric(
|
||||
fmt.Errorf("failed to acquire lock for artifact: %w", err),
|
||||
meta.FailedReason,
|
||||
)
|
||||
return sreconcile.ResultEmpty, &serror.Event{
|
||||
Err: fmt.Errorf("failed to acquire lock for artifact: %w", err),
|
||||
Reason: meta.FailedReason,
|
||||
}
|
||||
}
|
||||
defer unlock()
|
||||
|
||||
// Save artifact to storage in JSON format.
|
||||
b, err := chartRepo.ToJSON()
|
||||
if err != nil {
|
||||
e := serror.NewGeneric(
|
||||
fmt.Errorf("unable to get JSON index from chart repo: %w", err),
|
||||
sourcev1.ArchiveOperationFailedReason,
|
||||
)
|
||||
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e)
|
||||
return sreconcile.ResultEmpty, e
|
||||
}
|
||||
if err = r.Storage.Copy(artifact, bytes.NewBuffer(b)); err != nil {
|
||||
e := serror.NewGeneric(
|
||||
fmt.Errorf("unable to save artifact to storage: %w", err),
|
||||
sourcev1.ArchiveOperationFailedReason,
|
||||
)
|
||||
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, "%s", e)
|
||||
// Save artifact to storage.
|
||||
if err = r.Storage.CopyFromPath(artifact, chartRepo.CachePath); err != nil {
|
||||
e := &serror.Event{
|
||||
Err: fmt.Errorf("unable to save artifact to storage: %w", err),
|
||||
Reason: sourcev1.ArchiveOperationFailedReason,
|
||||
}
|
||||
conditions.MarkTrue(obj, sourcev1.StorageOperationFailedCondition, e.Reason, e.Err.Error())
|
||||
return sreconcile.ResultEmpty, e
|
||||
}
|
||||
|
||||
// Record it on the object.
|
||||
obj.Status.Artifact = artifact.DeepCopy()
|
||||
|
||||
// Cache the index if it was successfully retrieved.
|
||||
if r.Cache != nil && chartRepo.Index != nil {
|
||||
// The cache keys have to be safe in multi-tenancy environments, as
|
||||
// otherwise it could be used as a vector to bypass the repository's
|
||||
// authentication. Using the Artifact.Path is safe as the path is in
|
||||
// the format of: /<repository-name>/<chart-name>/<filename>.
|
||||
if err := r.Cache.Set(artifact.Path, chartRepo.Index, r.TTL); err != nil {
|
||||
r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.CacheOperationFailedReason, "failed to cache index: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Update index symlink.
|
||||
indexURL, err := r.Storage.Symlink(*artifact, "index.yaml")
|
||||
if err != nil {
|
||||
|
@ -615,6 +546,26 @@ func (r *HelmRepositoryReconciler) reconcileArtifact(ctx context.Context, sp *pa
|
|||
obj.Status.URL = indexURL
|
||||
}
|
||||
conditions.Delete(obj, sourcev1.StorageOperationFailedCondition)
|
||||
|
||||
// enable cache if applicable
|
||||
if r.Cache != nil && chartRepo.IndexCache == nil {
|
||||
chartRepo.SetMemCache(r.Storage.LocalPath(*artifact), r.Cache, r.TTL, func(event string) {
|
||||
r.IncCacheEvents(event, obj.GetName(), obj.GetNamespace())
|
||||
})
|
||||
}
|
||||
|
||||
// Cache the index if it was successfully retrieved
|
||||
// and the chart was successfully built
|
||||
if r.Cache != nil && chartRepo.Index != nil {
|
||||
// The cache key have to be safe in multi-tenancy environments,
|
||||
// as otherwise it could be used as a vector to bypass the helm repository's authentication.
|
||||
// Using r.Storage.LocalPath(*repo.GetArtifact() is safe as the path is in the format /<helm-repository-name>/<chart-name>/<filename>.
|
||||
err := chartRepo.CacheIndexInMemory()
|
||||
if err != nil {
|
||||
r.eventLogf(ctx, obj, eventv1.EventTypeTrace, sourcev1.CacheOperationFailedReason, "failed to cache index: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
return sreconcile.ResultSuccess, nil
|
||||
}
|
||||
|
||||
|
@ -633,12 +584,6 @@ func (r *HelmRepositoryReconciler) reconcileDelete(ctx context.Context, obj *sou
|
|||
controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer)
|
||||
}
|
||||
|
||||
// Delete cache metrics.
|
||||
if r.CacheRecorder != nil && r.Metrics.IsDelete(obj) {
|
||||
r.DeleteCacheEvent(cache.CacheEventTypeHit, obj.Name, obj.Namespace)
|
||||
r.DeleteCacheEvent(cache.CacheEventTypeMiss, obj.Name, obj.Namespace)
|
||||
}
|
||||
|
||||
// Stop reconciliation as the object is being deleted
|
||||
return sreconcile.ResultEmpty, nil
|
||||
}
|
||||
|
@ -652,10 +597,10 @@ func (r *HelmRepositoryReconciler) reconcileDelete(ctx context.Context, obj *sou
|
|||
func (r *HelmRepositoryReconciler) garbageCollect(ctx context.Context, obj *sourcev1.HelmRepository) error {
|
||||
if !obj.DeletionTimestamp.IsZero() || (obj.Spec.Type != "" && obj.Spec.Type != sourcev1.HelmRepositoryTypeDefault) {
|
||||
if deleted, err := r.Storage.RemoveAll(r.Storage.NewArtifactFor(obj.Kind, obj.GetObjectMeta(), "", "*")); err != nil {
|
||||
return serror.NewGeneric(
|
||||
fmt.Errorf("garbage collection for deleted resource failed: %w", err),
|
||||
"GarbageCollectionFailed",
|
||||
)
|
||||
return &serror.Event{
|
||||
Err: fmt.Errorf("garbage collection for deleted resource failed: %w", err),
|
||||
Reason: "GarbageCollectionFailed",
|
||||
}
|
||||
} else if deleted != "" {
|
||||
r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded",
|
||||
"garbage collected artifacts for deleted resource")
|
||||
|
@ -670,14 +615,14 @@ func (r *HelmRepositoryReconciler) garbageCollect(ctx context.Context, obj *sour
|
|||
if obj.GetArtifact() != nil {
|
||||
delFiles, err := r.Storage.GarbageCollect(ctx, *obj.GetArtifact(), time.Second*5)
|
||||
if err != nil {
|
||||
return serror.NewGeneric(
|
||||
fmt.Errorf("garbage collection of artifacts failed: %w", err),
|
||||
"GarbageCollectionFailed",
|
||||
)
|
||||
return &serror.Event{
|
||||
Err: fmt.Errorf("garbage collection of artifacts failed: %w", err),
|
||||
Reason: "GarbageCollectionFailed",
|
||||
}
|
||||
}
|
||||
if len(delFiles) > 0 {
|
||||
r.eventLogf(ctx, obj, eventv1.EventTypeTrace, "GarbageCollectionSucceeded",
|
||||
"garbage collected %d artifacts", len(delFiles))
|
||||
fmt.Sprintf("garbage collected %d artifacts", len(delFiles)))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -699,31 +644,3 @@ func (r *HelmRepositoryReconciler) eventLogf(ctx context.Context, obj runtime.Ob
|
|||
}
|
||||
r.Eventf(obj, eventType, reason, msg)
|
||||
}
|
||||
|
||||
// migrateToStatic is HelmRepository OCI migration to static object.
|
||||
func (r *HelmRepositoryReconciler) migrationToStatic(ctx context.Context, sp *patch.SerialPatcher, obj *sourcev1.HelmRepository) (result ctrl.Result, err error) {
|
||||
// Skip migration if suspended and not being deleted.
|
||||
if obj.Spec.Suspend && obj.DeletionTimestamp.IsZero() {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if !intpredicates.HelmRepositoryOCIRequireMigration(obj) {
|
||||
// Already migrated, nothing to do.
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// Delete any artifact.
|
||||
_, err = r.reconcileDelete(ctx, obj)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
// Delete finalizer and reset the status.
|
||||
controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer)
|
||||
obj.Status = sourcev1.HelmRepositoryStatus{}
|
||||
|
||||
if err := sp.Patch(ctx, obj); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
|
@ -0,0 +1,404 @@
|
|||
/*
|
||||
Copyright 2022 The Flux authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
helmgetter "helm.sh/helm/v3/pkg/getter"
|
||||
helmreg "helm.sh/helm/v3/pkg/registry"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
kerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
kuberecorder "k8s.io/client-go/tools/record"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
|
||||
"github.com/fluxcd/pkg/apis/meta"
|
||||
"github.com/fluxcd/pkg/oci"
|
||||
"github.com/fluxcd/pkg/runtime/conditions"
|
||||
helper "github.com/fluxcd/pkg/runtime/controller"
|
||||
"github.com/fluxcd/pkg/runtime/patch"
|
||||
"github.com/fluxcd/pkg/runtime/predicates"
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
|
||||
"github.com/fluxcd/source-controller/api/v1beta2"
|
||||
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
|
||||
"github.com/fluxcd/source-controller/internal/helm/registry"
|
||||
"github.com/fluxcd/source-controller/internal/helm/repository"
|
||||
"github.com/fluxcd/source-controller/internal/object"
|
||||
intpredicates "github.com/fluxcd/source-controller/internal/predicates"
|
||||
)
|
||||
|
||||
var helmRepositoryOCIOwnedConditions = []string{
|
||||
meta.ReadyCondition,
|
||||
meta.ReconcilingCondition,
|
||||
meta.StalledCondition,
|
||||
}
|
||||
|
||||
var helmRepositoryOCINegativeConditions = []string{
|
||||
meta.StalledCondition,
|
||||
meta.ReconcilingCondition,
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmrepositories,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmrepositories/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=source.toolkit.fluxcd.io,resources=helmrepositories/finalizers,verbs=get;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch
|
||||
|
||||
// HelmRepositoryOCI Reconciler reconciles a v1beta2.HelmRepository object of type OCI.
|
||||
type HelmRepositoryOCIReconciler struct {
|
||||
client.Client
|
||||
kuberecorder.EventRecorder
|
||||
helper.Metrics
|
||||
Getters helmgetter.Providers
|
||||
ControllerName string
|
||||
RegistryClientGenerator RegistryClientGeneratorFunc
|
||||
}
|
||||
|
||||
// RegistryClientGeneratorFunc is a function that returns a registry client
|
||||
// and an optional file name.
|
||||
// The file is used to store the registry client credentials.
|
||||
// The caller is responsible for deleting the file.
|
||||
type RegistryClientGeneratorFunc func(isLogin bool) (*helmreg.Client, string, error)
|
||||
|
||||
func (r *HelmRepositoryOCIReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return r.SetupWithManagerAndOptions(mgr, HelmRepositoryReconcilerOptions{})
|
||||
}
|
||||
|
||||
func (r *HelmRepositoryOCIReconciler) SetupWithManagerAndOptions(mgr ctrl.Manager, opts HelmRepositoryReconcilerOptions) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&sourcev1.HelmRepository{}).
|
||||
WithEventFilter(
|
||||
predicate.And(
|
||||
intpredicates.HelmRepositoryTypePredicate{RepositoryType: sourcev1.HelmRepositoryTypeOCI},
|
||||
predicate.Or(predicate.GenerationChangedPredicate{}, predicates.ReconcileRequestedPredicate{}),
|
||||
),
|
||||
).
|
||||
WithOptions(controller.Options{
|
||||
MaxConcurrentReconciles: opts.MaxConcurrentReconciles,
|
||||
RateLimiter: opts.RateLimiter,
|
||||
RecoverPanic: true,
|
||||
}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func (r *HelmRepositoryOCIReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, retErr error) {
|
||||
start := time.Now()
|
||||
log := ctrl.LoggerFrom(ctx)
|
||||
|
||||
// Fetch the HelmRepository
|
||||
obj := &sourcev1.HelmRepository{}
|
||||
if err := r.Get(ctx, req.NamespacedName, obj); err != nil {
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
// Record suspended status metric
|
||||
r.RecordSuspend(ctx, obj, obj.Spec.Suspend)
|
||||
|
||||
// Initialize the patch helper with the current version of the object.
|
||||
patchHelper, err := patch.NewHelper(obj, r.Client)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// Always attempt to patch the object after each reconciliation.
|
||||
defer func() {
|
||||
// Patch the object, prioritizing the conditions owned by the controller in
|
||||
// case of any conflicts.
|
||||
patchOpts := []patch.Option{
|
||||
patch.WithOwnedConditions{
|
||||
Conditions: helmRepositoryOCIOwnedConditions,
|
||||
},
|
||||
}
|
||||
patchOpts = append(patchOpts, patch.WithFieldOwner(r.ControllerName))
|
||||
// If a reconcile annotation value is found, set it in the object status
|
||||
// as status.lastHandledReconcileAt.
|
||||
if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok {
|
||||
object.SetStatusLastHandledReconcileAt(obj, v)
|
||||
}
|
||||
|
||||
// Set status observed generation option if the object is stalled, or
|
||||
// if the object is ready.
|
||||
if conditions.IsStalled(obj) || conditions.IsReady(obj) {
|
||||
patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{})
|
||||
}
|
||||
|
||||
if err = patchHelper.Patch(ctx, obj, patchOpts...); err != nil {
|
||||
// Ignore patch error "not found" when the object is being deleted.
|
||||
if !obj.GetDeletionTimestamp().IsZero() {
|
||||
err = kerrors.FilterOut(err, func(e error) bool { return apierrors.IsNotFound(e) })
|
||||
}
|
||||
retErr = kerrors.NewAggregate([]error{retErr, err})
|
||||
}
|
||||
|
||||
// Always record readiness and duration metrics
|
||||
r.Metrics.RecordReadiness(ctx, obj)
|
||||
r.Metrics.RecordDuration(ctx, obj, start)
|
||||
}()
|
||||
|
||||
// Add finalizer first if it doesn't exist to avoid the race condition
|
||||
// between init and delete.
|
||||
if !controllerutil.ContainsFinalizer(obj, sourcev1.SourceFinalizer) {
|
||||
controllerutil.AddFinalizer(obj, sourcev1.SourceFinalizer)
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
}
|
||||
|
||||
// Examine if the object is under deletion.
|
||||
if !obj.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
return r.reconcileDelete(ctx, obj)
|
||||
}
|
||||
|
||||
// Return if the object is suspended.
|
||||
if obj.Spec.Suspend {
|
||||
log.Info("reconciliation is suspended for this object")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// Examine if a type change has happened and act accordingly
|
||||
if obj.Spec.Type != sourcev1.HelmRepositoryTypeOCI {
|
||||
// Remove any stale condition and ignore the object if the type has
|
||||
// changed.
|
||||
obj.Status.Conditions = nil
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
result, retErr = r.reconcile(ctx, obj)
|
||||
return
|
||||
}
|
||||
|
||||
// reconcile reconciles the HelmRepository object. While reconciling, when an
|
||||
// error is encountered, it sets the failure details in the appropriate status
|
||||
// condition type and returns the error with appropriate ctrl.Result. The object
|
||||
// status conditions and the returned results are evaluated in the deferred
|
||||
// block at the very end to summarize the conditions to be in a consistent
|
||||
// state.
|
||||
func (r *HelmRepositoryOCIReconciler) reconcile(ctx context.Context, obj *v1beta2.HelmRepository) (result ctrl.Result, retErr error) {
|
||||
ctxTimeout, cancel := context.WithTimeout(ctx, obj.Spec.Timeout.Duration)
|
||||
defer cancel()
|
||||
|
||||
oldObj := obj.DeepCopy()
|
||||
|
||||
defer func() {
|
||||
// If it's stalled, ensure reconciling is removed.
|
||||
if sc := conditions.Get(obj, meta.StalledCondition); sc != nil && sc.Status == metav1.ConditionTrue {
|
||||
conditions.Delete(obj, meta.ReconcilingCondition)
|
||||
}
|
||||
|
||||
// Check if it's a successful reconciliation.
|
||||
if result.RequeueAfter == obj.GetRequeueAfter() && result.Requeue == false &&
|
||||
retErr == nil {
|
||||
// Remove reconciling condition if the reconciliation was successful.
|
||||
conditions.Delete(obj, meta.ReconcilingCondition)
|
||||
// If it's not ready even though it's not reconciling or stalled,
|
||||
// set the ready failure message as the error.
|
||||
// Based on isNonStalledSuccess() from internal/reconcile/summarize.
|
||||
if ready := conditions.Get(obj, meta.ReadyCondition); ready != nil &&
|
||||
ready.Status == metav1.ConditionFalse && !conditions.IsStalled(obj) {
|
||||
retErr = errors.New(conditions.GetMessage(obj, meta.ReadyCondition))
|
||||
}
|
||||
}
|
||||
|
||||
// If it's still a successful reconciliation and it's not reconciling or
|
||||
// stalled, mark Ready=True.
|
||||
if !conditions.IsReconciling(obj) && !conditions.IsStalled(obj) &&
|
||||
retErr == nil && result.RequeueAfter == obj.GetRequeueAfter() {
|
||||
conditions.MarkTrue(obj, meta.ReadyCondition, meta.SucceededReason, "Helm repository is ready")
|
||||
}
|
||||
|
||||
// Emit events when object's state changes.
|
||||
ready := conditions.Get(obj, meta.ReadyCondition)
|
||||
// Became ready from not ready.
|
||||
if !conditions.IsReady(oldObj) && conditions.IsReady(obj) {
|
||||
r.eventLogf(ctx, obj, corev1.EventTypeNormal, ready.Reason, ready.Message)
|
||||
}
|
||||
// Became not ready from ready.
|
||||
if conditions.IsReady(oldObj) && !conditions.IsReady(obj) {
|
||||
r.eventLogf(ctx, obj, corev1.EventTypeWarning, ready.Reason, ready.Message)
|
||||
}
|
||||
}()
|
||||
|
||||
// Set reconciling condition.
|
||||
if obj.Generation != obj.Status.ObservedGeneration {
|
||||
conditions.MarkReconciling(obj, "NewGeneration", "reconciling new object generation (%d)", obj.Generation)
|
||||
}
|
||||
|
||||
// Ensure that it's an OCI URL before continuing.
|
||||
if !helmreg.IsOCI(obj.Spec.URL) {
|
||||
u, err := url.Parse(obj.Spec.URL)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to parse URL: %w", err)
|
||||
} else {
|
||||
err = fmt.Errorf("URL scheme '%s' in '%s' is not supported", u.Scheme, obj.Spec.URL)
|
||||
}
|
||||
conditions.MarkStalled(obj, sourcev1.URLInvalidReason, err.Error())
|
||||
conditions.MarkFalse(obj, meta.ReadyCondition, sourcev1.URLInvalidReason, err.Error())
|
||||
ctrl.LoggerFrom(ctx).Error(err, "reconciliation stalled")
|
||||
result, retErr = ctrl.Result{}, nil
|
||||
return
|
||||
}
|
||||
conditions.Delete(obj, meta.StalledCondition)
|
||||
|
||||
var (
|
||||
authenticator authn.Authenticator
|
||||
keychain authn.Keychain
|
||||
err error
|
||||
)
|
||||
// Configure any authentication related options.
|
||||
if obj.Spec.SecretRef != nil {
|
||||
keychain, err = authFromSecret(ctx, r.Client, obj)
|
||||
if err != nil {
|
||||
conditions.MarkFalse(obj, meta.ReadyCondition, sourcev1.AuthenticationFailedReason, err.Error())
|
||||
result, retErr = ctrl.Result{}, err
|
||||
return
|
||||
}
|
||||
} else if obj.Spec.Provider != sourcev1.GenericOCIProvider && obj.Spec.Type == sourcev1.HelmRepositoryTypeOCI {
|
||||
auth, authErr := oidcAuth(ctxTimeout, obj.Spec.URL, obj.Spec.Provider)
|
||||
if authErr != nil && !errors.Is(authErr, oci.ErrUnconfiguredProvider) {
|
||||
e := fmt.Errorf("failed to get credential from %s: %w", obj.Spec.Provider, authErr)
|
||||
conditions.MarkFalse(obj, meta.ReadyCondition, sourcev1.AuthenticationFailedReason, e.Error())
|
||||
result, retErr = ctrl.Result{}, e
|
||||
return
|
||||
}
|
||||
if auth != nil {
|
||||
authenticator = auth
|
||||
}
|
||||
}
|
||||
|
||||
loginOpt, err := makeLoginOption(authenticator, keychain, obj.Spec.URL)
|
||||
if err != nil {
|
||||
conditions.MarkFalse(obj, meta.ReadyCondition, sourcev1.AuthenticationFailedReason, err.Error())
|
||||
result, retErr = ctrl.Result{}, err
|
||||
return
|
||||
}
|
||||
|
||||
// Create registry client and login if needed.
|
||||
registryClient, file, err := r.RegistryClientGenerator(loginOpt != nil)
|
||||
if err != nil {
|
||||
e := fmt.Errorf("failed to create registry client: %w", err)
|
||||
conditions.MarkFalse(obj, meta.ReadyCondition, meta.FailedReason, e.Error())
|
||||
result, retErr = ctrl.Result{}, e
|
||||
return
|
||||
}
|
||||
if file != "" {
|
||||
defer func() {
|
||||
if err := os.Remove(file); err != nil {
|
||||
r.eventLogf(ctx, obj, corev1.EventTypeWarning, meta.FailedReason,
|
||||
"failed to delete temporary credentials file: %s", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
chartRepo, err := repository.NewOCIChartRepository(obj.Spec.URL, repository.WithOCIRegistryClient(registryClient))
|
||||
if err != nil {
|
||||
e := fmt.Errorf("failed to parse URL '%s': %w", obj.Spec.URL, err)
|
||||
conditions.MarkStalled(obj, sourcev1.URLInvalidReason, e.Error())
|
||||
conditions.MarkFalse(obj, meta.ReadyCondition, sourcev1.URLInvalidReason, e.Error())
|
||||
result, retErr = ctrl.Result{}, nil
|
||||
return
|
||||
}
|
||||
conditions.Delete(obj, meta.StalledCondition)
|
||||
|
||||
// Attempt to login to the registry if credentials are provided.
|
||||
if loginOpt != nil {
|
||||
err = chartRepo.Login(loginOpt)
|
||||
if err != nil {
|
||||
e := fmt.Errorf("failed to login to registry '%s': %w", obj.Spec.URL, err)
|
||||
conditions.MarkFalse(obj, meta.ReadyCondition, sourcev1.AuthenticationFailedReason, e.Error())
|
||||
result, retErr = ctrl.Result{}, e
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Remove any stale Ready condition, most likely False, set above. Its value
|
||||
// is derived from the overall result of the reconciliation in the deferred
|
||||
// block at the very end.
|
||||
conditions.Delete(obj, meta.ReadyCondition)
|
||||
|
||||
result, retErr = ctrl.Result{RequeueAfter: obj.GetRequeueAfter()}, nil
|
||||
return
|
||||
}
|
||||
|
||||
func (r *HelmRepositoryOCIReconciler) reconcileDelete(ctx context.Context, obj *sourcev1.HelmRepository) (ctrl.Result, error) {
|
||||
// Remove our finalizer from the list
|
||||
controllerutil.RemoveFinalizer(obj, sourcev1.SourceFinalizer)
|
||||
|
||||
// Stop reconciliation as the object is being deleted
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// eventLogf records events, and logs at the same time.
|
||||
//
|
||||
// This log is different from the debug log in the EventRecorder, in the sense
|
||||
// that this is a simple log. While the debug log contains complete details
|
||||
// about the event.
|
||||
func (r *HelmRepositoryOCIReconciler) eventLogf(ctx context.Context, obj runtime.Object, eventType string, reason string, messageFmt string, args ...interface{}) {
|
||||
msg := fmt.Sprintf(messageFmt, args...)
|
||||
// Log and emit event.
|
||||
if eventType == corev1.EventTypeWarning {
|
||||
ctrl.LoggerFrom(ctx).Error(errors.New(reason), msg)
|
||||
} else {
|
||||
ctrl.LoggerFrom(ctx).Info(msg)
|
||||
}
|
||||
r.Eventf(obj, eventType, reason, msg)
|
||||
}
|
||||
|
||||
// authFromSecret returns an authn.Keychain for the given HelmRepository.
|
||||
// If the HelmRepository does not specify a secretRef, an anonymous keychain is returned.
|
||||
func authFromSecret(ctx context.Context, client client.Client, obj *sourcev1.HelmRepository) (authn.Keychain, error) {
|
||||
// Attempt to retrieve secret.
|
||||
name := types.NamespacedName{
|
||||
Namespace: obj.GetNamespace(),
|
||||
Name: obj.Spec.SecretRef.Name,
|
||||
}
|
||||
var secret corev1.Secret
|
||||
if err := client.Get(ctx, name, &secret); err != nil {
|
||||
return nil, fmt.Errorf("failed to get secret '%s': %w", name.String(), err)
|
||||
}
|
||||
|
||||
// Construct login options.
|
||||
keychain, err := registry.LoginOptionFromSecret(obj.Spec.URL, secret)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure Helm client with secret data: %w", err)
|
||||
}
|
||||
return keychain, nil
|
||||
}
|
||||
|
||||
// makeLoginOption returns a registry login option for the given HelmRepository.
|
||||
// If the HelmRepository does not specify a secretRef, a nil login option is returned.
|
||||
func makeLoginOption(auth authn.Authenticator, keychain authn.Keychain, registryURL string) (helmreg.LoginOption, error) {
|
||||
if auth != nil {
|
||||
return registry.AuthAdaptHelper(auth)
|
||||
}
|
||||
|
||||
if keychain != nil {
|
||||
return registry.KeychainAdaptHelper(keychain)(registryURL)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
|
@ -0,0 +1,304 @@
|
|||
/*
|
||||
Copyright 2022 The Flux authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/gomega"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
kstatus "sigs.k8s.io/cli-utils/pkg/kstatus/status"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
|
||||
"github.com/fluxcd/pkg/apis/meta"
|
||||
"github.com/fluxcd/pkg/runtime/conditions"
|
||||
conditionscheck "github.com/fluxcd/pkg/runtime/conditions/check"
|
||||
"github.com/fluxcd/pkg/runtime/patch"
|
||||
|
||||
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
|
||||
"github.com/fluxcd/source-controller/internal/helm/registry"
|
||||
)
|
||||
|
||||
func TestHelmRepositoryOCIReconciler_Reconcile(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
secretType corev1.SecretType
|
||||
secretData map[string][]byte
|
||||
}{
|
||||
{
|
||||
name: "valid auth data",
|
||||
secretData: map[string][]byte{
|
||||
"username": []byte(testRegistryUsername),
|
||||
"password": []byte(testRegistryPassword),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "no auth data",
|
||||
secretData: nil,
|
||||
},
|
||||
{
|
||||
name: "dockerconfigjson Secret",
|
||||
secretType: corev1.SecretTypeDockerConfigJson,
|
||||
secretData: map[string][]byte{
|
||||
".dockerconfigjson": []byte(`{"auths":{"` +
|
||||
testRegistryServer.registryHost + `":{"` +
|
||||
`auth":"` + base64.StdEncoding.EncodeToString([]byte(testRegistryUsername+":"+testRegistryPassword)) + `"}}}`),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
g := NewWithT(t)
|
||||
|
||||
ns, err := testEnv.CreateNamespace(ctx, "helmrepository-oci-reconcile-test")
|
||||
g.Expect(err).ToNot(HaveOccurred())
|
||||
defer func() { g.Expect(testEnv.Delete(ctx, ns)).To(Succeed()) }()
|
||||
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "helmrepository-",
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Data: tt.secretData,
|
||||
}
|
||||
if tt.secretType != "" {
|
||||
secret.Type = tt.secretType
|
||||
}
|
||||
|
||||
g.Expect(testEnv.CreateAndWait(ctx, secret)).To(Succeed())
|
||||
|
||||
origObj := &sourcev1.HelmRepository{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "helmrepository-oci-reconcile-",
|
||||
Namespace: ns.Name,
|
||||
},
|
||||
Spec: sourcev1.HelmRepositorySpec{
|
||||
Interval: metav1.Duration{Duration: interval},
|
||||
URL: fmt.Sprintf("oci://%s", testRegistryServer.registryHost),
|
||||
SecretRef: &meta.LocalObjectReference{
|
||||
Name: secret.Name,
|
||||
},
|
||||
Provider: sourcev1.GenericOCIProvider,
|
||||
Type: sourcev1.HelmRepositoryTypeOCI,
|
||||
},
|
||||
}
|
||||
obj := origObj.DeepCopy()
|
||||
g.Expect(testEnv.Create(ctx, obj)).To(Succeed())
|
||||
|
||||
key := client.ObjectKey{Name: obj.Name, Namespace: obj.Namespace}
|
||||
|
||||
// Wait for finalizer to be set
|
||||
g.Eventually(func() bool {
|
||||
if err := testEnv.Get(ctx, key, obj); err != nil {
|
||||
return false
|
||||
}
|
||||
return len(obj.Finalizers) > 0
|
||||
}, timeout).Should(BeTrue())
|
||||
|
||||
// Wait for HelmRepository to be Ready
|
||||
waitForSourceReadyWithoutArtifact(ctx, g, obj)
|
||||
|
||||
// Check if the object status is valid.
|
||||
condns := &conditionscheck.Conditions{NegativePolarity: helmRepositoryReadyCondition.NegativePolarity}
|
||||
checker := conditionscheck.NewChecker(testEnv.Client, condns)
|
||||
checker.CheckErr(ctx, obj)
|
||||
|
||||
// kstatus client conformance check.
|
||||
u, err := patch.ToUnstructured(obj)
|
||||
g.Expect(err).ToNot(HaveOccurred())
|
||||
res, err := kstatus.Compute(u)
|
||||
g.Expect(err).ToNot(HaveOccurred())
|
||||
g.Expect(res.Status).To(Equal(kstatus.CurrentStatus))
|
||||
|
||||
// Patch the object with reconcile request annotation.
|
||||
patchHelper, err := patch.NewHelper(obj, testEnv.Client)
|
||||
g.Expect(err).ToNot(HaveOccurred())
|
||||
annotations := map[string]string{
|
||||
meta.ReconcileRequestAnnotation: "now",
|
||||
}
|
||||
obj.SetAnnotations(annotations)
|
||||
g.Expect(patchHelper.Patch(ctx, obj)).ToNot(HaveOccurred())
|
||||
g.Eventually(func() bool {
|
||||
if err := testEnv.Get(ctx, key, obj); err != nil {
|
||||
return false
|
||||
}
|
||||
return obj.Status.LastHandledReconcileAt == "now"
|
||||
}, timeout).Should(BeTrue())
|
||||
|
||||
g.Expect(testEnv.Delete(ctx, obj)).To(Succeed())
|
||||
|
||||
// Wait for HelmRepository to be deleted
|
||||
waitForSourceDeletion(ctx, g, obj)
|
||||
|
||||
// Check if a suspended object gets deleted.
|
||||
obj = origObj.DeepCopy()
|
||||
testSuspendedObjectDeleteWithoutArtifact(ctx, g, obj)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHelmRepositoryOCIReconciler_authStrategy(t *testing.T) {
|
||||
type secretOptions struct {
|
||||
username string
|
||||
password string
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
url string
|
||||
registryOpts registryOptions
|
||||
secretOpts secretOptions
|
||||
provider string
|
||||
providerImg string
|
||||
want ctrl.Result
|
||||
wantErr bool
|
||||
assertConditions []metav1.Condition
|
||||
}{
|
||||
{
|
||||
name: "HTTP without basic auth",
|
||||
want: ctrl.Result{RequeueAfter: interval},
|
||||
assertConditions: []metav1.Condition{
|
||||
*conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Helm repository is ready"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "HTTP with basic auth secret",
|
||||
want: ctrl.Result{RequeueAfter: interval},
|
||||
registryOpts: registryOptions{
|
||||
withBasicAuth: true,
|
||||
},
|
||||
secretOpts: secretOptions{
|
||||
username: testRegistryUsername,
|
||||
password: testRegistryPassword,
|
||||
},
|
||||
assertConditions: []metav1.Condition{
|
||||
*conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Helm repository is ready"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "HTTP registry - basic auth with invalid secret",
|
||||
want: ctrl.Result{},
|
||||
wantErr: true,
|
||||
registryOpts: registryOptions{
|
||||
withBasicAuth: true,
|
||||
},
|
||||
secretOpts: secretOptions{
|
||||
username: "wrong-pass",
|
||||
password: "wrong-pass",
|
||||
},
|
||||
assertConditions: []metav1.Condition{
|
||||
*conditions.FalseCondition(meta.ReadyCondition, sourcev1.AuthenticationFailedReason, "failed to login to registry"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "with contextual login provider",
|
||||
wantErr: true,
|
||||
provider: "aws",
|
||||
providerImg: "oci://123456789000.dkr.ecr.us-east-2.amazonaws.com/test",
|
||||
assertConditions: []metav1.Condition{
|
||||
*conditions.FalseCondition(meta.ReadyCondition, sourcev1.AuthenticationFailedReason, "failed to get credential from"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "with contextual login provider and secretRef",
|
||||
want: ctrl.Result{RequeueAfter: interval},
|
||||
registryOpts: registryOptions{
|
||||
withBasicAuth: true,
|
||||
},
|
||||
secretOpts: secretOptions{
|
||||
username: testRegistryUsername,
|
||||
password: testRegistryPassword,
|
||||
},
|
||||
provider: "azure",
|
||||
assertConditions: []metav1.Condition{
|
||||
*conditions.TrueCondition(meta.ReadyCondition, meta.SucceededReason, "Helm repository is ready"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
g := NewWithT(t)
|
||||
|
||||
builder := fakeclient.NewClientBuilder().WithScheme(testEnv.GetScheme())
|
||||
workspaceDir := t.TempDir()
|
||||
server, err := setupRegistryServer(ctx, workspaceDir, tt.registryOpts)
|
||||
g.Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
obj := &sourcev1.HelmRepository{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "auth-strategy-",
|
||||
},
|
||||
Spec: sourcev1.HelmRepositorySpec{
|
||||
Interval: metav1.Duration{Duration: interval},
|
||||
Timeout: &metav1.Duration{Duration: timeout},
|
||||
Type: sourcev1.HelmRepositoryTypeOCI,
|
||||
Provider: sourcev1.GenericOCIProvider,
|
||||
URL: fmt.Sprintf("oci://%s", server.registryHost),
|
||||
},
|
||||
}
|
||||
|
||||
if tt.provider != "" {
|
||||
obj.Spec.Provider = tt.provider
|
||||
}
|
||||
// If a provider specific image is provided, overwrite existing URL
|
||||
// set earlier. It'll fail but it's necessary to set them because
|
||||
// the login check expects the URLs to be of certain pattern.
|
||||
if tt.providerImg != "" {
|
||||
obj.Spec.URL = tt.providerImg
|
||||
}
|
||||
|
||||
if tt.secretOpts.username != "" && tt.secretOpts.password != "" {
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "auth-secretref",
|
||||
},
|
||||
Type: corev1.SecretTypeDockerConfigJson,
|
||||
Data: map[string][]byte{
|
||||
".dockerconfigjson": []byte(fmt.Sprintf(`{"auths": {%q: {"username": %q, "password": %q}}}`,
|
||||
server.registryHost, tt.secretOpts.username, tt.secretOpts.password)),
|
||||
},
|
||||
}
|
||||
|
||||
builder.WithObjects(secret)
|
||||
|
||||
obj.Spec.SecretRef = &meta.LocalObjectReference{
|
||||
Name: secret.Name,
|
||||
}
|
||||
}
|
||||
|
||||
r := &HelmRepositoryOCIReconciler{
|
||||
Client: builder.Build(),
|
||||
EventRecorder: record.NewFakeRecorder(32),
|
||||
Getters: testGetters,
|
||||
RegistryClientGenerator: registry.ClientGenerator,
|
||||
}
|
||||
|
||||
got, err := r.reconcile(ctx, obj)
|
||||
g.Expect(err != nil).To(Equal(tt.wantErr))
|
||||
g.Expect(got).To(Equal(tt.want))
|
||||
g.Expect(obj.Status.Conditions).To(conditions.MatchConditions(tt.assertConditions))
|
||||
})
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -14,13 +14,13 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"sigs.k8s.io/controller-runtime/pkg/event"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
|
||||
sourcev1 "github.com/fluxcd/source-controller/api/v1"
|
||||
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
|
||||
)
|
||||
|
||||
type SourceRevisionChangePredicate struct {
|
|
@ -14,13 +14,15 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"io/fs"
|
||||
"net/url"
|
||||
|
@ -31,31 +33,20 @@ import (
|
|||
"time"
|
||||
|
||||
securejoin "github.com/cyphar/filepath-securejoin"
|
||||
"github.com/go-git/go-git/v5/plumbing/format/gitignore"
|
||||
"github.com/opencontainers/go-digest"
|
||||
|
||||
"github.com/fluxcd/go-git/v5/plumbing/format/gitignore"
|
||||
"github.com/fluxcd/pkg/lockedfile"
|
||||
"github.com/fluxcd/pkg/untar"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
|
||||
"github.com/fluxcd/pkg/lockedfile"
|
||||
"github.com/fluxcd/pkg/sourceignore"
|
||||
pkgtar "github.com/fluxcd/pkg/tar"
|
||||
|
||||
v1 "github.com/fluxcd/source-controller/api/v1"
|
||||
intdigest "github.com/fluxcd/source-controller/internal/digest"
|
||||
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
|
||||
sourcefs "github.com/fluxcd/source-controller/internal/fs"
|
||||
)
|
||||
|
||||
const GarbageCountLimit = 1000
|
||||
|
||||
const (
|
||||
// defaultFileMode is the permission mode applied to files inside an artifact archive.
|
||||
defaultFileMode int64 = 0o600
|
||||
// defaultDirMode is the permission mode applied to all directories inside an artifact archive.
|
||||
defaultDirMode int64 = 0o750
|
||||
// defaultExeFileMode is the permission mode applied to executable files inside an artifact archive.
|
||||
defaultExeFileMode int64 = 0o700
|
||||
)
|
||||
|
||||
// Storage manages artifacts
|
||||
type Storage struct {
|
||||
// BasePath is the local directory path where the source artifacts are stored.
|
||||
|
@ -86,10 +77,10 @@ func NewStorage(basePath string, hostname string, artifactRetentionTTL time.Dura
|
|||
}, nil
|
||||
}
|
||||
|
||||
// NewArtifactFor returns a new v1.Artifact.
|
||||
func (s Storage) NewArtifactFor(kind string, metadata metav1.Object, revision, fileName string) v1.Artifact {
|
||||
path := v1.ArtifactPath(kind, metadata.GetNamespace(), metadata.GetName(), fileName)
|
||||
artifact := v1.Artifact{
|
||||
// NewArtifactFor returns a new v1beta1.Artifact.
|
||||
func (s *Storage) NewArtifactFor(kind string, metadata metav1.Object, revision, fileName string) sourcev1.Artifact {
|
||||
path := sourcev1.ArtifactPath(kind, metadata.GetNamespace(), metadata.GetName(), fileName)
|
||||
artifact := sourcev1.Artifact{
|
||||
Path: path,
|
||||
Revision: revision,
|
||||
}
|
||||
|
@ -97,8 +88,8 @@ func (s Storage) NewArtifactFor(kind string, metadata metav1.Object, revision, f
|
|||
return artifact
|
||||
}
|
||||
|
||||
// SetArtifactURL sets the URL on the given v1.Artifact.
|
||||
func (s Storage) SetArtifactURL(artifact *v1.Artifact) {
|
||||
// SetArtifactURL sets the URL on the given v1beta1.Artifact.
|
||||
func (s Storage) SetArtifactURL(artifact *sourcev1.Artifact) {
|
||||
if artifact.Path == "" {
|
||||
return
|
||||
}
|
||||
|
@ -119,19 +110,14 @@ func (s Storage) SetHostname(URL string) string {
|
|||
return u.String()
|
||||
}
|
||||
|
||||
// MkdirAll calls os.MkdirAll for the given v1.Artifact base dir.
|
||||
func (s Storage) MkdirAll(artifact v1.Artifact) error {
|
||||
// MkdirAll calls os.MkdirAll for the given v1beta1.Artifact base dir.
|
||||
func (s *Storage) MkdirAll(artifact sourcev1.Artifact) error {
|
||||
dir := filepath.Dir(s.LocalPath(artifact))
|
||||
return os.MkdirAll(dir, 0o700)
|
||||
}
|
||||
|
||||
// Remove calls os.Remove for the given v1.Artifact path.
|
||||
func (s Storage) Remove(artifact v1.Artifact) error {
|
||||
return os.Remove(s.LocalPath(artifact))
|
||||
}
|
||||
|
||||
// RemoveAll calls os.RemoveAll for the given v1.Artifact base dir.
|
||||
func (s Storage) RemoveAll(artifact v1.Artifact) (string, error) {
|
||||
// RemoveAll calls os.RemoveAll for the given v1beta1.Artifact base dir.
|
||||
func (s *Storage) RemoveAll(artifact sourcev1.Artifact) (string, error) {
|
||||
var deletedDir string
|
||||
dir := filepath.Dir(s.LocalPath(artifact))
|
||||
// Check if the dir exists.
|
||||
|
@ -142,8 +128,8 @@ func (s Storage) RemoveAll(artifact v1.Artifact) (string, error) {
|
|||
return deletedDir, os.RemoveAll(dir)
|
||||
}
|
||||
|
||||
// RemoveAllButCurrent removes all files for the given v1.Artifact base dir, excluding the current one.
|
||||
func (s Storage) RemoveAllButCurrent(artifact v1.Artifact) ([]string, error) {
|
||||
// RemoveAllButCurrent removes all files for the given v1beta1.Artifact base dir, excluding the current one.
|
||||
func (s *Storage) RemoveAllButCurrent(artifact sourcev1.Artifact) ([]string, error) {
|
||||
deletedFiles := []string{}
|
||||
localPath := s.LocalPath(artifact)
|
||||
dir := filepath.Dir(localPath)
|
||||
|
@ -173,17 +159,18 @@ func (s Storage) RemoveAllButCurrent(artifact v1.Artifact) ([]string, error) {
|
|||
|
||||
// getGarbageFiles returns all files that need to be garbage collected for the given artifact.
|
||||
// Garbage files are determined based on the below flow:
|
||||
// 1. collect all artifact files with an expired ttl
|
||||
// 1. collect all files with an expired ttl
|
||||
// 2. if we satisfy maxItemsToBeRetained, then return
|
||||
// 3. else, collect all artifact files till the latest n files remain, where n=maxItemsToBeRetained
|
||||
func (s Storage) getGarbageFiles(artifact v1.Artifact, totalCountLimit, maxItemsToBeRetained int, ttl time.Duration) (garbageFiles []string, _ error) {
|
||||
// 3. else, remove all files till the latest n files remain, where n=maxItemsToBeRetained
|
||||
func (s *Storage) getGarbageFiles(artifact sourcev1.Artifact, totalCountLimit, maxItemsToBeRetained int, ttl time.Duration) ([]string, error) {
|
||||
localPath := s.LocalPath(artifact)
|
||||
dir := filepath.Dir(localPath)
|
||||
artifactFilesWithCreatedTs := make(map[time.Time]string)
|
||||
garbageFiles := []string{}
|
||||
filesWithCreatedTs := make(map[time.Time]string)
|
||||
// sortedPaths contain all files sorted according to their created ts.
|
||||
sortedPaths := []string{}
|
||||
now := time.Now().UTC()
|
||||
totalArtifactFiles := 0
|
||||
totalFiles := 0
|
||||
var errors []string
|
||||
creationTimestamps := []time.Time{}
|
||||
_ = filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
|
||||
|
@ -191,8 +178,8 @@ func (s Storage) getGarbageFiles(artifact v1.Artifact, totalCountLimit, maxItems
|
|||
errors = append(errors, err.Error())
|
||||
return nil
|
||||
}
|
||||
if totalArtifactFiles >= totalCountLimit {
|
||||
return fmt.Errorf("reached file walking limit, already walked over: %d", totalArtifactFiles)
|
||||
if totalFiles >= totalCountLimit {
|
||||
return fmt.Errorf("reached file walking limit, already walked over: %d", totalFiles)
|
||||
}
|
||||
info, err := d.Info()
|
||||
if err != nil {
|
||||
|
@ -202,16 +189,14 @@ func (s Storage) getGarbageFiles(artifact v1.Artifact, totalCountLimit, maxItems
|
|||
createdAt := info.ModTime().UTC()
|
||||
diff := now.Sub(createdAt)
|
||||
// Compare the time difference between now and the time at which the file was created
|
||||
// with the provided TTL. Delete if the difference is greater than the TTL. Since the
|
||||
// below logic just deals with determining if an artifact needs to be garbage collected,
|
||||
// we avoid all lock files, adding them at the end to the list of garbage files.
|
||||
// with the provided TTL. Delete if the difference is greater than the TTL.
|
||||
expired := diff > ttl
|
||||
if !info.IsDir() && info.Mode()&os.ModeSymlink != os.ModeSymlink && filepath.Ext(path) != ".lock" {
|
||||
if !info.IsDir() && info.Mode()&os.ModeSymlink != os.ModeSymlink {
|
||||
if path != localPath && expired {
|
||||
garbageFiles = append(garbageFiles, path)
|
||||
}
|
||||
totalArtifactFiles += 1
|
||||
artifactFilesWithCreatedTs[createdAt] = path
|
||||
totalFiles += 1
|
||||
filesWithCreatedTs[createdAt] = path
|
||||
creationTimestamps = append(creationTimestamps, createdAt)
|
||||
}
|
||||
return nil
|
||||
|
@ -223,14 +208,14 @@ func (s Storage) getGarbageFiles(artifact v1.Artifact, totalCountLimit, maxItems
|
|||
|
||||
// We already collected enough garbage files to satisfy the no. of max
|
||||
// items that are supposed to be retained, so exit early.
|
||||
if totalArtifactFiles-len(garbageFiles) < maxItemsToBeRetained {
|
||||
if totalFiles-len(garbageFiles) < maxItemsToBeRetained {
|
||||
return garbageFiles, nil
|
||||
}
|
||||
|
||||
// sort all timestamps in ascending order.
|
||||
// sort all timestamps in an ascending order.
|
||||
sort.Slice(creationTimestamps, func(i, j int) bool { return creationTimestamps[i].Before(creationTimestamps[j]) })
|
||||
for _, ts := range creationTimestamps {
|
||||
path, ok := artifactFilesWithCreatedTs[ts]
|
||||
path, ok := filesWithCreatedTs[ts]
|
||||
if !ok {
|
||||
return garbageFiles, fmt.Errorf("failed to fetch file for created ts: %v", ts)
|
||||
}
|
||||
|
@ -240,8 +225,8 @@ func (s Storage) getGarbageFiles(artifact v1.Artifact, totalCountLimit, maxItems
|
|||
var collected int
|
||||
noOfGarbageFiles := len(garbageFiles)
|
||||
for _, path := range sortedPaths {
|
||||
if path != localPath && filepath.Ext(path) != ".lock" && !stringInSlice(path, garbageFiles) {
|
||||
// If we previously collected some garbage files with an expired ttl, then take that into account
|
||||
if path != localPath && !stringInSlice(path, garbageFiles) {
|
||||
// If we previously collected a few garbage files with an expired ttl, then take that into account
|
||||
// when checking whether we need to remove more files to satisfy the max no. of items allowed
|
||||
// in the filesystem, along with the no. of files already removed in this loop.
|
||||
if noOfGarbageFiles > 0 {
|
||||
|
@ -261,9 +246,9 @@ func (s Storage) getGarbageFiles(artifact v1.Artifact, totalCountLimit, maxItems
|
|||
return garbageFiles, nil
|
||||
}
|
||||
|
||||
// GarbageCollect removes all garbage files in the artifact dir according to the provided
|
||||
// GarbageCollect removes all garabge files in the artifact dir according to the provided
|
||||
// retention options.
|
||||
func (s Storage) GarbageCollect(ctx context.Context, artifact v1.Artifact, timeout time.Duration) ([]string, error) {
|
||||
func (s *Storage) GarbageCollect(ctx context.Context, artifact sourcev1.Artifact, timeout time.Duration) ([]string, error) {
|
||||
delFilesChan := make(chan []string)
|
||||
errChan := make(chan error)
|
||||
// Abort if it takes more than the provided timeout duration.
|
||||
|
@ -286,14 +271,6 @@ func (s Storage) GarbageCollect(ctx context.Context, artifact v1.Artifact, timeo
|
|||
} else {
|
||||
deleted = append(deleted, file)
|
||||
}
|
||||
// If a lock file exists for this garbage artifact, remove that too.
|
||||
lockFile := file + ".lock"
|
||||
if _, err = os.Lstat(lockFile); err == nil {
|
||||
err = os.Remove(lockFile)
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(errors) > 0 {
|
||||
|
@ -324,8 +301,8 @@ func stringInSlice(a string, list []string) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// ArtifactExist returns a boolean indicating whether the v1.Artifact exists in storage and is a regular file.
|
||||
func (s Storage) ArtifactExist(artifact v1.Artifact) bool {
|
||||
// ArtifactExist returns a boolean indicating whether the v1beta1.Artifact exists in storage and is a regular file.
|
||||
func (s *Storage) ArtifactExist(artifact sourcev1.Artifact) bool {
|
||||
fi, err := os.Lstat(s.LocalPath(artifact))
|
||||
if err != nil {
|
||||
return false
|
||||
|
@ -333,35 +310,6 @@ func (s Storage) ArtifactExist(artifact v1.Artifact) bool {
|
|||
return fi.Mode().IsRegular()
|
||||
}
|
||||
|
||||
// VerifyArtifact verifies if the Digest of the v1.Artifact matches the digest
|
||||
// of the file in Storage. It returns an error if the digests don't match, or
|
||||
// if it can't be verified.
|
||||
func (s Storage) VerifyArtifact(artifact v1.Artifact) error {
|
||||
if artifact.Digest == "" {
|
||||
return fmt.Errorf("artifact has no digest")
|
||||
}
|
||||
|
||||
d, err := digest.Parse(artifact.Digest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse artifact digest '%s': %w", artifact.Digest, err)
|
||||
}
|
||||
|
||||
f, err := os.Open(s.LocalPath(artifact))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
verifier := d.Verifier()
|
||||
if _, err = io.Copy(verifier, f); err != nil {
|
||||
return err
|
||||
}
|
||||
if !verifier.Verified() {
|
||||
return fmt.Errorf("computed digest doesn't match '%s'", d.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ArchiveFileFilter must return true if a file should not be included in the archive after inspecting the given path
|
||||
// and/or os.FileInfo.
|
||||
type ArchiveFileFilter func(p string, fi os.FileInfo) bool
|
||||
|
@ -380,11 +328,11 @@ func SourceIgnoreFilter(ps []gitignore.Pattern, domain []string) ArchiveFileFilt
|
|||
}
|
||||
}
|
||||
|
||||
// Archive atomically archives the given directory as a tarball to the given v1.Artifact path, excluding
|
||||
// Archive atomically archives the given directory as a tarball to the given v1beta1.Artifact path, excluding
|
||||
// directories and any ArchiveFileFilter matches. While archiving, any environment specific data (for example,
|
||||
// the user and group name) is stripped from file headers.
|
||||
// If successful, it sets the digest and last update time on the artifact.
|
||||
func (s Storage) Archive(artifact *v1.Artifact, dir string, filter ArchiveFileFilter) (err error) {
|
||||
// If successful, it sets the checksum and last update time on the artifact.
|
||||
func (s *Storage) Archive(artifact *sourcev1.Artifact, dir string, filter ArchiveFileFilter) (err error) {
|
||||
if f, err := os.Stat(dir); os.IsNotExist(err) || !f.IsDir() {
|
||||
return fmt.Errorf("invalid dir path: %s", dir)
|
||||
}
|
||||
|
@ -401,9 +349,9 @@ func (s Storage) Archive(artifact *v1.Artifact, dir string, filter ArchiveFileFi
|
|||
}
|
||||
}()
|
||||
|
||||
d := intdigest.Canonical.Digester()
|
||||
h := newHash()
|
||||
sz := &writeCounter{}
|
||||
mw := io.MultiWriter(d.Hash(), tf, sz)
|
||||
mw := io.MultiWriter(h, tf, sz)
|
||||
|
||||
gw := gzip.NewWriter(mw)
|
||||
tw := tar.NewWriter(gw)
|
||||
|
@ -426,7 +374,6 @@ func (s Storage) Archive(artifact *v1.Artifact, dir string, filter ArchiveFileFi
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// The name needs to be modified to maintain directory structure
|
||||
// as tar.FileInfoHeader only has access to the base name of the file.
|
||||
// Ref: https://golang.org/src/archive/tar/common.go?#L626
|
||||
|
@ -437,7 +384,17 @@ func (s Storage) Archive(artifact *v1.Artifact, dir string, filter ArchiveFileFi
|
|||
return err
|
||||
}
|
||||
}
|
||||
sanitizeHeader(relFilePath, header)
|
||||
header.Name = relFilePath
|
||||
|
||||
// We want to remove any environment specific data as well, this
|
||||
// ensures the checksum is purely content based.
|
||||
header.Gid = 0
|
||||
header.Uid = 0
|
||||
header.Uname = ""
|
||||
header.Gname = ""
|
||||
header.ModTime = time.Time{}
|
||||
header.AccessTime = time.Time{}
|
||||
header.ChangeTime = time.Time{}
|
||||
|
||||
if err := tw.WriteHeader(header); err != nil {
|
||||
return err
|
||||
|
@ -484,16 +441,16 @@ func (s Storage) Archive(artifact *v1.Artifact, dir string, filter ArchiveFileFi
|
|||
return err
|
||||
}
|
||||
|
||||
artifact.Digest = d.Digest().String()
|
||||
artifact.Checksum = fmt.Sprintf("%x", h.Sum(nil))
|
||||
artifact.LastUpdateTime = metav1.Now()
|
||||
artifact.Size = &sz.written
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AtomicWriteFile atomically writes the io.Reader contents to the v1.Artifact path.
|
||||
// If successful, it sets the digest and last update time on the artifact.
|
||||
func (s Storage) AtomicWriteFile(artifact *v1.Artifact, reader io.Reader, mode os.FileMode) (err error) {
|
||||
// AtomicWriteFile atomically writes the io.Reader contents to the v1beta1.Artifact path.
|
||||
// If successful, it sets the checksum and last update time on the artifact.
|
||||
func (s *Storage) AtomicWriteFile(artifact *sourcev1.Artifact, reader io.Reader, mode os.FileMode) (err error) {
|
||||
localPath := s.LocalPath(*artifact)
|
||||
tf, err := os.CreateTemp(filepath.Split(localPath))
|
||||
if err != nil {
|
||||
|
@ -506,9 +463,9 @@ func (s Storage) AtomicWriteFile(artifact *v1.Artifact, reader io.Reader, mode o
|
|||
}
|
||||
}()
|
||||
|
||||
d := intdigest.Canonical.Digester()
|
||||
h := newHash()
|
||||
sz := &writeCounter{}
|
||||
mw := io.MultiWriter(tf, d.Hash(), sz)
|
||||
mw := io.MultiWriter(h, tf, sz)
|
||||
|
||||
if _, err := io.Copy(mw, reader); err != nil {
|
||||
tf.Close()
|
||||
|
@ -526,16 +483,16 @@ func (s Storage) AtomicWriteFile(artifact *v1.Artifact, reader io.Reader, mode o
|
|||
return err
|
||||
}
|
||||
|
||||
artifact.Digest = d.Digest().String()
|
||||
artifact.Checksum = fmt.Sprintf("%x", h.Sum(nil))
|
||||
artifact.LastUpdateTime = metav1.Now()
|
||||
artifact.Size = &sz.written
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Copy atomically copies the io.Reader contents to the v1.Artifact path.
|
||||
// If successful, it sets the digest and last update time on the artifact.
|
||||
func (s Storage) Copy(artifact *v1.Artifact, reader io.Reader) (err error) {
|
||||
// Copy atomically copies the io.Reader contents to the v1beta1.Artifact path.
|
||||
// If successful, it sets the checksum and last update time on the artifact.
|
||||
func (s *Storage) Copy(artifact *sourcev1.Artifact, reader io.Reader) (err error) {
|
||||
localPath := s.LocalPath(*artifact)
|
||||
tf, err := os.CreateTemp(filepath.Split(localPath))
|
||||
if err != nil {
|
||||
|
@ -548,9 +505,9 @@ func (s Storage) Copy(artifact *v1.Artifact, reader io.Reader) (err error) {
|
|||
}
|
||||
}()
|
||||
|
||||
d := intdigest.Canonical.Digester()
|
||||
h := newHash()
|
||||
sz := &writeCounter{}
|
||||
mw := io.MultiWriter(tf, d.Hash(), sz)
|
||||
mw := io.MultiWriter(h, tf, sz)
|
||||
|
||||
if _, err := io.Copy(mw, reader); err != nil {
|
||||
tf.Close()
|
||||
|
@ -564,16 +521,16 @@ func (s Storage) Copy(artifact *v1.Artifact, reader io.Reader) (err error) {
|
|||
return err
|
||||
}
|
||||
|
||||
artifact.Digest = d.Digest().String()
|
||||
artifact.Checksum = fmt.Sprintf("%x", h.Sum(nil))
|
||||
artifact.LastUpdateTime = metav1.Now()
|
||||
artifact.Size = &sz.written
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CopyFromPath atomically copies the contents of the given path to the path of the v1.Artifact.
|
||||
// If successful, the digest and last update time on the artifact is set.
|
||||
func (s Storage) CopyFromPath(artifact *v1.Artifact, path string) (err error) {
|
||||
// CopyFromPath atomically copies the contents of the given path to the path of the v1beta1.Artifact.
|
||||
// If successful, the checksum and last update time on the artifact is set.
|
||||
func (s *Storage) CopyFromPath(artifact *sourcev1.Artifact, path string) (err error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -588,7 +545,7 @@ func (s Storage) CopyFromPath(artifact *v1.Artifact, path string) (err error) {
|
|||
}
|
||||
|
||||
// CopyToPath copies the contents in the (sub)path of the given artifact to the given path.
|
||||
func (s Storage) CopyToPath(artifact *v1.Artifact, subPath, toPath string) error {
|
||||
func (s *Storage) CopyToPath(artifact *sourcev1.Artifact, subPath, toPath string) error {
|
||||
// create a tmp directory to store artifact
|
||||
tmp, err := os.MkdirTemp("", "flux-include-")
|
||||
if err != nil {
|
||||
|
@ -606,7 +563,7 @@ func (s Storage) CopyToPath(artifact *v1.Artifact, subPath, toPath string) error
|
|||
|
||||
// untar the artifact
|
||||
untarPath := filepath.Join(tmp, "unpack")
|
||||
if err = pkgtar.Untar(f, untarPath, pkgtar.WithMaxUntarSize(-1)); err != nil {
|
||||
if _, err = untar.Untar(f, untarPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -626,8 +583,8 @@ func (s Storage) CopyToPath(artifact *v1.Artifact, subPath, toPath string) error
|
|||
return nil
|
||||
}
|
||||
|
||||
// Symlink creates or updates a symbolic link for the given v1.Artifact and returns the URL for the symlink.
|
||||
func (s Storage) Symlink(artifact v1.Artifact, linkName string) (string, error) {
|
||||
// Symlink creates or updates a symbolic link for the given v1beta1.Artifact and returns the URL for the symlink.
|
||||
func (s *Storage) Symlink(artifact sourcev1.Artifact, linkName string) (string, error) {
|
||||
localPath := s.LocalPath(artifact)
|
||||
dir := filepath.Dir(localPath)
|
||||
link := filepath.Join(dir, linkName)
|
||||
|
@ -645,18 +602,26 @@ func (s Storage) Symlink(artifact v1.Artifact, linkName string) (string, error)
|
|||
return "", err
|
||||
}
|
||||
|
||||
return fmt.Sprintf("http://%s/%s", s.Hostname, filepath.Join(filepath.Dir(artifact.Path), linkName)), nil
|
||||
url := fmt.Sprintf("http://%s/%s", s.Hostname, filepath.Join(filepath.Dir(artifact.Path), linkName))
|
||||
return url, nil
|
||||
}
|
||||
|
||||
// Lock creates a file lock for the given v1.Artifact.
|
||||
func (s Storage) Lock(artifact v1.Artifact) (unlock func(), err error) {
|
||||
// Checksum returns the SHA256 checksum for the data of the given io.Reader as a string.
|
||||
func (s *Storage) Checksum(reader io.Reader) string {
|
||||
h := newHash()
|
||||
_, _ = io.Copy(h, reader)
|
||||
return fmt.Sprintf("%x", h.Sum(nil))
|
||||
}
|
||||
|
||||
// Lock creates a file lock for the given v1beta1.Artifact.
|
||||
func (s *Storage) Lock(artifact sourcev1.Artifact) (unlock func(), err error) {
|
||||
lockFile := s.LocalPath(artifact) + ".lock"
|
||||
mutex := lockedfile.MutexAt(lockFile)
|
||||
return mutex.Lock()
|
||||
}
|
||||
|
||||
// LocalPath returns the secure local path of the given artifact (that is: relative to the Storage.BasePath).
|
||||
func (s Storage) LocalPath(artifact v1.Artifact) string {
|
||||
func (s *Storage) LocalPath(artifact sourcev1.Artifact) string {
|
||||
if artifact.Path == "" {
|
||||
return ""
|
||||
}
|
||||
|
@ -667,7 +632,12 @@ func (s Storage) LocalPath(artifact v1.Artifact) string {
|
|||
return path
|
||||
}
|
||||
|
||||
// writeCounter is an implementation of io.Writer that only records the number
|
||||
// newHash returns a new SHA256 hash.
|
||||
func newHash() hash.Hash {
|
||||
return sha256.New()
|
||||
}
|
||||
|
||||
// writecounter is an implementation of io.Writer that only records the number
|
||||
// of bytes written.
|
||||
type writeCounter struct {
|
||||
written int64
|
||||
|
@ -678,42 +648,3 @@ func (wc *writeCounter) Write(p []byte) (int, error) {
|
|||
wc.written += int64(n)
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// sanitizeHeader modifies the tar.Header to be relative to the root of the
|
||||
// archive and removes any environment specific data.
|
||||
func sanitizeHeader(relP string, h *tar.Header) {
|
||||
// Modify the name to be relative to the root of the archive,
|
||||
// this ensures we maintain the same structure when extracting.
|
||||
h.Name = relP
|
||||
|
||||
// We want to remove any environment specific data as well, this
|
||||
// ensures the checksum is purely content based.
|
||||
h.Gid = 0
|
||||
h.Uid = 0
|
||||
h.Uname = ""
|
||||
h.Gname = ""
|
||||
h.ModTime = time.Time{}
|
||||
h.AccessTime = time.Time{}
|
||||
h.ChangeTime = time.Time{}
|
||||
|
||||
// Override the mode to be the default for the type of file.
|
||||
setDefaultMode(h)
|
||||
}
|
||||
|
||||
// setDefaultMode sets the default mode for the given header.
|
||||
func setDefaultMode(h *tar.Header) {
|
||||
if h.FileInfo().IsDir() {
|
||||
h.Mode = defaultDirMode
|
||||
return
|
||||
}
|
||||
|
||||
if h.FileInfo().Mode().IsRegular() {
|
||||
mode := h.FileInfo().Mode()
|
||||
if mode&os.ModeType == 0 && mode&0o111 != 0 {
|
||||
h.Mode = defaultExeFileMode
|
||||
return
|
||||
}
|
||||
h.Mode = defaultFileMode
|
||||
return
|
||||
}
|
||||
}
|
|
@ -14,26 +14,25 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/go-git/go-git/v5/plumbing/format/gitignore"
|
||||
"github.com/fluxcd/go-git/v5/plumbing/format/gitignore"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
sourcev1 "github.com/fluxcd/source-controller/api/v1"
|
||||
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
|
||||
)
|
||||
|
||||
func TestStorageConstructor(t *testing.T) {
|
||||
|
@ -62,16 +61,16 @@ func TestStorageConstructor(t *testing.T) {
|
|||
|
||||
// walks a tar.gz and looks for paths with the basename. It does not match
|
||||
// symlinks properly at this time because that's painful.
|
||||
func walkTar(tarFile string, match string, dir bool) (int64, int64, bool, error) {
|
||||
func walkTar(tarFile string, match string, dir bool) (int64, bool, error) {
|
||||
f, err := os.Open(tarFile)
|
||||
if err != nil {
|
||||
return 0, 0, false, fmt.Errorf("could not open file: %w", err)
|
||||
return 0, false, fmt.Errorf("could not open file: %w", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
gzr, err := gzip.NewReader(f)
|
||||
if err != nil {
|
||||
return 0, 0, false, fmt.Errorf("could not unzip file: %w", err)
|
||||
return 0, false, fmt.Errorf("could not unzip file: %w", err)
|
||||
}
|
||||
defer gzr.Close()
|
||||
|
||||
|
@ -81,24 +80,24 @@ func walkTar(tarFile string, match string, dir bool) (int64, int64, bool, error)
|
|||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
return 0, 0, false, fmt.Errorf("corrupt tarball reading header: %w", err)
|
||||
return 0, false, fmt.Errorf("corrupt tarball reading header: %w", err)
|
||||
}
|
||||
|
||||
switch header.Typeflag {
|
||||
case tar.TypeDir:
|
||||
if header.Name == match && dir {
|
||||
return 0, header.Mode, true, nil
|
||||
return 0, true, nil
|
||||
}
|
||||
case tar.TypeReg:
|
||||
if header.Name == match {
|
||||
return header.Size, header.Mode, true, nil
|
||||
return header.Size, true, nil
|
||||
}
|
||||
default:
|
||||
// skip
|
||||
}
|
||||
}
|
||||
|
||||
return 0, 0, false, nil
|
||||
return 0, false, nil
|
||||
}
|
||||
|
||||
func TestStorage_Archive(t *testing.T) {
|
||||
|
@ -109,14 +108,9 @@ func TestStorage_Archive(t *testing.T) {
|
|||
t.Fatalf("error while bootstrapping storage: %v", err)
|
||||
}
|
||||
|
||||
type dummyFile struct {
|
||||
content []byte
|
||||
mode int64
|
||||
}
|
||||
|
||||
createFiles := func(files map[string]dummyFile) (dir string, err error) {
|
||||
createFiles := func(files map[string][]byte) (dir string, err error) {
|
||||
dir = t.TempDir()
|
||||
for name, df := range files {
|
||||
for name, b := range files {
|
||||
absPath := filepath.Join(dir, name)
|
||||
if err = os.MkdirAll(filepath.Dir(absPath), 0o750); err != nil {
|
||||
return
|
||||
|
@ -125,33 +119,27 @@ func TestStorage_Archive(t *testing.T) {
|
|||
if err != nil {
|
||||
return "", fmt.Errorf("could not create file %q: %w", absPath, err)
|
||||
}
|
||||
if n, err := f.Write(df.content); err != nil {
|
||||
if n, err := f.Write(b); err != nil {
|
||||
f.Close()
|
||||
return "", fmt.Errorf("could not write %d bytes to file %q: %w", n, f.Name(), err)
|
||||
}
|
||||
f.Close()
|
||||
|
||||
if df.mode != 0 {
|
||||
if err = os.Chmod(absPath, os.FileMode(df.mode)); err != nil {
|
||||
return "", fmt.Errorf("could not chmod file %q: %w", absPath, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
matchFiles := func(t *testing.T, storage *Storage, artifact sourcev1.Artifact, files map[string]dummyFile, dirs []string) {
|
||||
matchFiles := func(t *testing.T, storage *Storage, artifact sourcev1.Artifact, files map[string][]byte, dirs []string) {
|
||||
t.Helper()
|
||||
for name, df := range files {
|
||||
for name, b := range files {
|
||||
mustExist := !(name[0:1] == "!")
|
||||
if !mustExist {
|
||||
name = name[1:]
|
||||
}
|
||||
s, m, exist, err := walkTar(storage.LocalPath(artifact), name, false)
|
||||
s, exist, err := walkTar(storage.LocalPath(artifact), name, false)
|
||||
if err != nil {
|
||||
t.Fatalf("failed reading tarball: %v", err)
|
||||
}
|
||||
if bs := int64(len(df.content)); s != bs {
|
||||
if bs := int64(len(b)); s != bs {
|
||||
t.Fatalf("%q size %v != %v", name, s, bs)
|
||||
}
|
||||
if exist != mustExist {
|
||||
|
@ -161,20 +149,13 @@ func TestStorage_Archive(t *testing.T) {
|
|||
t.Errorf("tarball contained excluded file %q", name)
|
||||
}
|
||||
}
|
||||
expectMode := df.mode
|
||||
if expectMode == 0 {
|
||||
expectMode = defaultFileMode
|
||||
}
|
||||
if exist && m != expectMode {
|
||||
t.Fatalf("%q mode %v != %v", name, m, expectMode)
|
||||
}
|
||||
}
|
||||
for _, name := range dirs {
|
||||
mustExist := !(name[0:1] == "!")
|
||||
if !mustExist {
|
||||
name = name[1:]
|
||||
}
|
||||
_, m, exist, err := walkTar(storage.LocalPath(artifact), name, true)
|
||||
_, exist, err := walkTar(storage.LocalPath(artifact), name, true)
|
||||
if err != nil {
|
||||
t.Fatalf("failed reading tarball: %v", err)
|
||||
}
|
||||
|
@ -185,71 +166,67 @@ func TestStorage_Archive(t *testing.T) {
|
|||
t.Errorf("tarball contained excluded file %q", name)
|
||||
}
|
||||
}
|
||||
if exist && m != defaultDirMode {
|
||||
t.Fatalf("%q mode %v != %v", name, m, defaultDirMode)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
files map[string]dummyFile
|
||||
files map[string][]byte
|
||||
filter ArchiveFileFilter
|
||||
want map[string]dummyFile
|
||||
want map[string][]byte
|
||||
wantDirs []string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "no filter",
|
||||
files: map[string]dummyFile{
|
||||
".git/config": {},
|
||||
"file.jpg": {content: []byte(`contents`)},
|
||||
"manifest.yaml": {},
|
||||
files: map[string][]byte{
|
||||
".git/config": nil,
|
||||
"file.jpg": []byte(`contents`),
|
||||
"manifest.yaml": nil,
|
||||
},
|
||||
filter: nil,
|
||||
want: map[string]dummyFile{
|
||||
".git/config": {},
|
||||
"file.jpg": {content: []byte(`contents`)},
|
||||
"manifest.yaml": {},
|
||||
want: map[string][]byte{
|
||||
".git/config": nil,
|
||||
"file.jpg": []byte(`contents`),
|
||||
"manifest.yaml": nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "exclude VCS",
|
||||
files: map[string]dummyFile{
|
||||
".git/config": {},
|
||||
"manifest.yaml": {},
|
||||
files: map[string][]byte{
|
||||
".git/config": nil,
|
||||
"manifest.yaml": nil,
|
||||
},
|
||||
wantDirs: []string{
|
||||
"!.git",
|
||||
},
|
||||
filter: SourceIgnoreFilter(nil, nil),
|
||||
want: map[string]dummyFile{
|
||||
"!.git/config": {},
|
||||
"manifest.yaml": {},
|
||||
want: map[string][]byte{
|
||||
"!.git/config": nil,
|
||||
"manifest.yaml": nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "custom",
|
||||
files: map[string]dummyFile{
|
||||
".git/config": {},
|
||||
"custom": {},
|
||||
"horse.jpg": {},
|
||||
files: map[string][]byte{
|
||||
".git/config": nil,
|
||||
"custom": nil,
|
||||
"horse.jpg": nil,
|
||||
},
|
||||
filter: SourceIgnoreFilter([]gitignore.Pattern{
|
||||
gitignore.ParsePattern("custom", nil),
|
||||
}, nil),
|
||||
want: map[string]dummyFile{
|
||||
"!git/config": {},
|
||||
"!custom": {},
|
||||
"horse.jpg": {},
|
||||
want: map[string][]byte{
|
||||
"!git/config": nil,
|
||||
"!custom": nil,
|
||||
"horse.jpg": nil,
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "including directories",
|
||||
files: map[string]dummyFile{
|
||||
"test/.gitkeep": {},
|
||||
files: map[string][]byte{
|
||||
"test/.gitkeep": nil,
|
||||
},
|
||||
filter: SourceIgnoreFilter([]gitignore.Pattern{
|
||||
gitignore.ParsePattern("custom", nil),
|
||||
|
@ -259,26 +236,6 @@ func TestStorage_Archive(t *testing.T) {
|
|||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "sets default file modes",
|
||||
files: map[string]dummyFile{
|
||||
"test/file": {
|
||||
mode: 0o666,
|
||||
},
|
||||
"test/executable": {
|
||||
mode: 0o777,
|
||||
},
|
||||
},
|
||||
want: map[string]dummyFile{
|
||||
"test/file": {
|
||||
mode: defaultFileMode,
|
||||
},
|
||||
"test/executable": {
|
||||
mode: defaultExeFileMode,
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
@ -302,44 +259,6 @@ func TestStorage_Archive(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestStorage_Remove(t *testing.T) {
|
||||
t.Run("removes file", func(t *testing.T) {
|
||||
g := NewWithT(t)
|
||||
|
||||
dir := t.TempDir()
|
||||
|
||||
s, err := NewStorage(dir, "", 0, 0)
|
||||
g.Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
artifact := sourcev1.Artifact{
|
||||
Path: filepath.Join(dir, "test.txt"),
|
||||
}
|
||||
g.Expect(s.MkdirAll(artifact)).To(Succeed())
|
||||
g.Expect(s.AtomicWriteFile(&artifact, bytes.NewReader([]byte("test")), 0o600)).To(Succeed())
|
||||
g.Expect(s.ArtifactExist(artifact)).To(BeTrue())
|
||||
|
||||
g.Expect(s.Remove(artifact)).To(Succeed())
|
||||
g.Expect(s.ArtifactExist(artifact)).To(BeFalse())
|
||||
})
|
||||
|
||||
t.Run("error if file does not exist", func(t *testing.T) {
|
||||
g := NewWithT(t)
|
||||
|
||||
dir := t.TempDir()
|
||||
|
||||
s, err := NewStorage(dir, "", 0, 0)
|
||||
g.Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
artifact := sourcev1.Artifact{
|
||||
Path: filepath.Join(dir, "test.txt"),
|
||||
}
|
||||
|
||||
err = s.Remove(artifact)
|
||||
g.Expect(err).To(HaveOccurred())
|
||||
g.Expect(errors.Is(err, os.ErrNotExist)).To(BeTrue())
|
||||
})
|
||||
}
|
||||
|
||||
func TestStorageRemoveAllButCurrent(t *testing.T) {
|
||||
t.Run("bad directory in archive", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
@ -349,7 +268,7 @@ func TestStorageRemoveAllButCurrent(t *testing.T) {
|
|||
t.Fatalf("Valid path did not successfully return: %v", err)
|
||||
}
|
||||
|
||||
if _, err := s.RemoveAllButCurrent(sourcev1.Artifact{Path: filepath.Join(dir, "really", "nonexistent")}); err == nil {
|
||||
if _, err := s.RemoveAllButCurrent(sourcev1.Artifact{Path: path.Join(dir, "really", "nonexistent")}); err == nil {
|
||||
t.Fatal("Did not error while pruning non-existent path")
|
||||
}
|
||||
})
|
||||
|
@ -362,18 +281,18 @@ func TestStorageRemoveAllButCurrent(t *testing.T) {
|
|||
g.Expect(err).ToNot(HaveOccurred(), "failed to create new storage")
|
||||
|
||||
artifact := sourcev1.Artifact{
|
||||
Path: filepath.Join("foo", "bar", "artifact1.tar.gz"),
|
||||
Path: path.Join("foo", "bar", "artifact1.tar.gz"),
|
||||
}
|
||||
|
||||
// Create artifact dir and artifacts.
|
||||
artifactDir := filepath.Join(dir, "foo", "bar")
|
||||
artifactDir := path.Join(dir, "foo", "bar")
|
||||
g.Expect(os.MkdirAll(artifactDir, 0o750)).NotTo(HaveOccurred())
|
||||
current := []string{
|
||||
filepath.Join(artifactDir, "artifact1.tar.gz"),
|
||||
path.Join(artifactDir, "artifact1.tar.gz"),
|
||||
}
|
||||
wantDeleted := []string{
|
||||
filepath.Join(artifactDir, "file1.txt"),
|
||||
filepath.Join(artifactDir, "file2.txt"),
|
||||
path.Join(artifactDir, "file1.txt"),
|
||||
path.Join(artifactDir, "file2.txt"),
|
||||
}
|
||||
createFile := func(files []string) {
|
||||
for _, c := range files {
|
||||
|
@ -402,15 +321,15 @@ func TestStorageRemoveAll(t *testing.T) {
|
|||
}{
|
||||
{
|
||||
name: "delete non-existent path",
|
||||
artifactPath: filepath.Join("foo", "bar", "artifact1.tar.gz"),
|
||||
artifactPath: path.Join("foo", "bar", "artifact1.tar.gz"),
|
||||
createArtifactPath: false,
|
||||
wantDeleted: "",
|
||||
},
|
||||
{
|
||||
name: "delete existing path",
|
||||
artifactPath: filepath.Join("foo", "bar", "artifact1.tar.gz"),
|
||||
artifactPath: path.Join("foo", "bar", "artifact1.tar.gz"),
|
||||
createArtifactPath: true,
|
||||
wantDeleted: filepath.Join("foo", "bar"),
|
||||
wantDeleted: path.Join("foo", "bar"),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -427,7 +346,7 @@ func TestStorageRemoveAll(t *testing.T) {
|
|||
}
|
||||
|
||||
if tt.createArtifactPath {
|
||||
g.Expect(os.MkdirAll(filepath.Join(dir, tt.artifactPath), 0o750)).ToNot(HaveOccurred())
|
||||
g.Expect(os.MkdirAll(path.Join(dir, tt.artifactPath), 0o750)).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
deleted, err := s.RemoveAll(artifact)
|
||||
|
@ -530,7 +449,7 @@ func TestStorageCopyFromPath(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestStorage_getGarbageFiles(t *testing.T) {
|
||||
artifactFolder := filepath.Join("foo", "bar")
|
||||
artifactFolder := path.Join("foo", "bar")
|
||||
tests := []struct {
|
||||
name string
|
||||
artifactPaths []string
|
||||
|
@ -543,119 +462,77 @@ func TestStorage_getGarbageFiles(t *testing.T) {
|
|||
{
|
||||
name: "delete files based on maxItemsToBeRetained",
|
||||
artifactPaths: []string{
|
||||
filepath.Join(artifactFolder, "artifact1.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact2.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact3.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact4.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact5.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact1.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact2.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact3.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact4.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact5.tar.gz"),
|
||||
},
|
||||
createPause: time.Millisecond * 10,
|
||||
ttl: time.Minute * 2,
|
||||
totalCountLimit: 10,
|
||||
maxItemsToBeRetained: 2,
|
||||
wantDeleted: []string{
|
||||
filepath.Join(artifactFolder, "artifact1.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact2.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact3.tar.gz"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "delete files based on maxItemsToBeRetained, ignore lock files",
|
||||
artifactPaths: []string{
|
||||
filepath.Join(artifactFolder, "artifact1.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact1.tar.gz.lock"),
|
||||
filepath.Join(artifactFolder, "artifact2.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact2.tar.gz.lock"),
|
||||
filepath.Join(artifactFolder, "artifact3.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact3.tar.gz.lock"),
|
||||
filepath.Join(artifactFolder, "artifact4.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact5.tar.gz"),
|
||||
},
|
||||
createPause: time.Millisecond * 10,
|
||||
ttl: time.Minute * 2,
|
||||
totalCountLimit: 10,
|
||||
maxItemsToBeRetained: 2,
|
||||
wantDeleted: []string{
|
||||
filepath.Join(artifactFolder, "artifact1.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact2.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact3.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact1.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact2.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact3.tar.gz"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "delete files based on ttl",
|
||||
artifactPaths: []string{
|
||||
filepath.Join(artifactFolder, "artifact1.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact2.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact3.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact4.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact5.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact1.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact2.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact3.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact4.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact5.tar.gz"),
|
||||
},
|
||||
createPause: time.Second * 1,
|
||||
ttl: time.Second*3 + time.Millisecond*500,
|
||||
totalCountLimit: 10,
|
||||
maxItemsToBeRetained: 4,
|
||||
wantDeleted: []string{
|
||||
filepath.Join(artifactFolder, "artifact1.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact2.tar.gz"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "delete files based on ttl, ignore lock files",
|
||||
artifactPaths: []string{
|
||||
filepath.Join(artifactFolder, "artifact1.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact1.tar.gz.lock"),
|
||||
filepath.Join(artifactFolder, "artifact2.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact2.tar.gz.lock"),
|
||||
filepath.Join(artifactFolder, "artifact3.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact4.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact5.tar.gz"),
|
||||
},
|
||||
createPause: time.Second * 1,
|
||||
ttl: time.Second*3 + time.Millisecond*500,
|
||||
totalCountLimit: 10,
|
||||
maxItemsToBeRetained: 4,
|
||||
wantDeleted: []string{
|
||||
filepath.Join(artifactFolder, "artifact1.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact2.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact1.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact2.tar.gz"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "delete files based on ttl and maxItemsToBeRetained",
|
||||
artifactPaths: []string{
|
||||
filepath.Join(artifactFolder, "artifact1.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact2.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact3.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact4.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact5.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact6.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact1.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact2.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact3.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact4.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact5.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact6.tar.gz"),
|
||||
},
|
||||
createPause: time.Second * 1,
|
||||
ttl: time.Second*5 + time.Millisecond*500,
|
||||
totalCountLimit: 10,
|
||||
maxItemsToBeRetained: 4,
|
||||
wantDeleted: []string{
|
||||
filepath.Join(artifactFolder, "artifact1.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact2.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact1.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact2.tar.gz"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "delete files based on ttl and maxItemsToBeRetained and totalCountLimit",
|
||||
artifactPaths: []string{
|
||||
filepath.Join(artifactFolder, "artifact1.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact2.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact3.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact4.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact5.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact6.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact1.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact2.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact3.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact4.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact5.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact6.tar.gz"),
|
||||
},
|
||||
createPause: time.Millisecond * 500,
|
||||
ttl: time.Millisecond * 500,
|
||||
totalCountLimit: 3,
|
||||
maxItemsToBeRetained: 2,
|
||||
wantDeleted: []string{
|
||||
filepath.Join(artifactFolder, "artifact1.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact2.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact3.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact1.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact2.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact3.tar.gz"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -671,9 +548,9 @@ func TestStorage_getGarbageFiles(t *testing.T) {
|
|||
artifact := sourcev1.Artifact{
|
||||
Path: tt.artifactPaths[len(tt.artifactPaths)-1],
|
||||
}
|
||||
g.Expect(os.MkdirAll(filepath.Join(dir, artifactFolder), 0o750)).ToNot(HaveOccurred())
|
||||
g.Expect(os.MkdirAll(path.Join(dir, artifactFolder), 0o750)).ToNot(HaveOccurred())
|
||||
for _, artifactPath := range tt.artifactPaths {
|
||||
f, err := os.Create(filepath.Join(dir, artifactPath))
|
||||
f, err := os.Create(path.Join(dir, artifactPath))
|
||||
g.Expect(err).ToNot(HaveOccurred())
|
||||
g.Expect(f.Close()).ToNot(HaveOccurred())
|
||||
time.Sleep(tt.createPause)
|
||||
|
@ -699,11 +576,10 @@ func TestStorage_getGarbageFiles(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestStorage_GarbageCollect(t *testing.T) {
|
||||
artifactFolder := filepath.Join("foo", "bar")
|
||||
artifactFolder := path.Join("foo", "bar")
|
||||
tests := []struct {
|
||||
name string
|
||||
artifactPaths []string
|
||||
wantCollected []string
|
||||
wantDeleted []string
|
||||
wantErr string
|
||||
ctxTimeout time.Duration
|
||||
|
@ -711,32 +587,24 @@ func TestStorage_GarbageCollect(t *testing.T) {
|
|||
{
|
||||
name: "garbage collects",
|
||||
artifactPaths: []string{
|
||||
filepath.Join(artifactFolder, "artifact1.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact1.tar.gz.lock"),
|
||||
filepath.Join(artifactFolder, "artifact2.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact2.tar.gz.lock"),
|
||||
filepath.Join(artifactFolder, "artifact3.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact4.tar.gz"),
|
||||
},
|
||||
wantCollected: []string{
|
||||
filepath.Join(artifactFolder, "artifact1.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact2.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact1.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact2.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact3.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact4.tar.gz"),
|
||||
},
|
||||
wantDeleted: []string{
|
||||
filepath.Join(artifactFolder, "artifact1.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact1.tar.gz.lock"),
|
||||
filepath.Join(artifactFolder, "artifact2.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact2.tar.gz.lock"),
|
||||
path.Join(artifactFolder, "artifact1.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact2.tar.gz"),
|
||||
},
|
||||
ctxTimeout: time.Second * 1,
|
||||
},
|
||||
{
|
||||
name: "garbage collection fails with context timeout",
|
||||
artifactPaths: []string{
|
||||
filepath.Join(artifactFolder, "artifact1.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact2.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact3.tar.gz"),
|
||||
filepath.Join(artifactFolder, "artifact4.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact1.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact2.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact3.tar.gz"),
|
||||
path.Join(artifactFolder, "artifact4.tar.gz"),
|
||||
},
|
||||
wantErr: "context deadline exceeded",
|
||||
ctxTimeout: time.Nanosecond * 1,
|
||||
|
@ -754,9 +622,9 @@ func TestStorage_GarbageCollect(t *testing.T) {
|
|||
artifact := sourcev1.Artifact{
|
||||
Path: tt.artifactPaths[len(tt.artifactPaths)-1],
|
||||
}
|
||||
g.Expect(os.MkdirAll(filepath.Join(dir, artifactFolder), 0o750)).ToNot(HaveOccurred())
|
||||
g.Expect(os.MkdirAll(path.Join(dir, artifactFolder), 0o750)).ToNot(HaveOccurred())
|
||||
for i, artifactPath := range tt.artifactPaths {
|
||||
f, err := os.Create(filepath.Join(dir, artifactPath))
|
||||
f, err := os.Create(path.Join(dir, artifactPath))
|
||||
g.Expect(err).ToNot(HaveOccurred())
|
||||
g.Expect(f.Close()).ToNot(HaveOccurred())
|
||||
if i != len(tt.artifactPaths)-1 {
|
||||
|
@ -764,90 +632,29 @@ func TestStorage_GarbageCollect(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
collectedPaths, err := s.GarbageCollect(context.TODO(), artifact, tt.ctxTimeout)
|
||||
deletedPaths, err := s.GarbageCollect(context.TODO(), artifact, tt.ctxTimeout)
|
||||
if tt.wantErr == "" {
|
||||
g.Expect(err).ToNot(HaveOccurred(), "failed to collect garbage files")
|
||||
} else {
|
||||
g.Expect(err).To(HaveOccurred())
|
||||
g.Expect(err.Error()).To(ContainSubstring(tt.wantErr))
|
||||
}
|
||||
if len(tt.wantCollected) > 0 {
|
||||
g.Expect(len(tt.wantCollected)).To(Equal(len(collectedPaths)))
|
||||
for _, wantCollectedPath := range tt.wantCollected {
|
||||
if len(tt.wantDeleted) > 0 {
|
||||
g.Expect(len(tt.wantDeleted)).To(Equal(len(deletedPaths)))
|
||||
for _, wantDeletedPath := range tt.wantDeleted {
|
||||
present := false
|
||||
for _, collectedPath := range collectedPaths {
|
||||
if strings.Contains(collectedPath, wantCollectedPath) {
|
||||
g.Expect(collectedPath).ToNot(BeAnExistingFile())
|
||||
for _, deletedPath := range deletedPaths {
|
||||
if strings.Contains(deletedPath, wantDeletedPath) {
|
||||
g.Expect(deletedPath).ToNot(BeAnExistingFile())
|
||||
present = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if present == false {
|
||||
g.Fail(fmt.Sprintf("expected file to be garbage collected, still exists: %s", wantCollectedPath))
|
||||
g.Fail(fmt.Sprintf("expected file to be deleted, still exists: %s", wantDeletedPath))
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, delFile := range tt.wantDeleted {
|
||||
g.Expect(filepath.Join(dir, delFile)).ToNot(BeAnExistingFile())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStorage_VerifyArtifact(t *testing.T) {
|
||||
g := NewWithT(t)
|
||||
|
||||
dir := t.TempDir()
|
||||
s, err := NewStorage(dir, "", 0, 0)
|
||||
g.Expect(err).ToNot(HaveOccurred(), "failed to create new storage")
|
||||
|
||||
g.Expect(os.WriteFile(filepath.Join(dir, "artifact"), []byte("test"), 0o600)).To(Succeed())
|
||||
|
||||
t.Run("artifact without digest", func(t *testing.T) {
|
||||
g := NewWithT(t)
|
||||
|
||||
err := s.VerifyArtifact(sourcev1.Artifact{})
|
||||
g.Expect(err).To(HaveOccurred())
|
||||
g.Expect(err).To(MatchError("artifact has no digest"))
|
||||
})
|
||||
|
||||
t.Run("artifact with invalid digest", func(t *testing.T) {
|
||||
g := NewWithT(t)
|
||||
|
||||
err := s.VerifyArtifact(sourcev1.Artifact{Digest: "invalid"})
|
||||
g.Expect(err).To(HaveOccurred())
|
||||
g.Expect(err).To(MatchError("failed to parse artifact digest 'invalid': invalid checksum digest format"))
|
||||
})
|
||||
|
||||
t.Run("artifact with invalid path", func(t *testing.T) {
|
||||
g := NewWithT(t)
|
||||
|
||||
err := s.VerifyArtifact(sourcev1.Artifact{
|
||||
Digest: "sha256:9ba7a35ce8acd3557fe30680ef193ca7a36bb5dc62788f30de7122a0a5beab69",
|
||||
Path: "invalid",
|
||||
})
|
||||
g.Expect(err).To(HaveOccurred())
|
||||
g.Expect(errors.Is(err, os.ErrNotExist)).To(BeTrue())
|
||||
})
|
||||
|
||||
t.Run("artifact with digest mismatch", func(t *testing.T) {
|
||||
g := NewWithT(t)
|
||||
|
||||
err := s.VerifyArtifact(sourcev1.Artifact{
|
||||
Digest: "sha256:9ba7a35ce8acd3557fe30680ef193ca7a36bb5dc62788f30de7122a0a5beab69",
|
||||
Path: "artifact",
|
||||
})
|
||||
g.Expect(err).To(HaveOccurred())
|
||||
g.Expect(err).To(MatchError("computed digest doesn't match 'sha256:9ba7a35ce8acd3557fe30680ef193ca7a36bb5dc62788f30de7122a0a5beab69'"))
|
||||
})
|
||||
|
||||
t.Run("artifact with digest match", func(t *testing.T) {
|
||||
g := NewWithT(t)
|
||||
|
||||
err := s.VerifyArtifact(sourcev1.Artifact{
|
||||
Digest: "sha256:9f86d081884c7d659a2feaa0c55ad015a3bf4f1b2b0b822cd15d6c15b0f00a08",
|
||||
Path: "artifact",
|
||||
})
|
||||
g.Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
}
|
|
@ -14,27 +14,20 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/foxcpp/go-mockdns"
|
||||
"github.com/phayes/freeport"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
"helm.sh/helm/v3/pkg/getter"
|
||||
helmreg "helm.sh/helm/v3/pkg/registry"
|
||||
|
@ -42,21 +35,25 @@ import (
|
|||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/tools/record"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/yaml"
|
||||
|
||||
dcontext "github.com/distribution/distribution/v3/context"
|
||||
"github.com/fluxcd/pkg/git/libgit2/transport"
|
||||
"github.com/fluxcd/pkg/runtime/controller"
|
||||
"github.com/fluxcd/pkg/runtime/testenv"
|
||||
"github.com/fluxcd/pkg/testserver"
|
||||
"github.com/phayes/freeport"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/distribution/distribution/v3/configuration"
|
||||
dockerRegistry "github.com/distribution/distribution/v3/registry"
|
||||
_ "github.com/distribution/distribution/v3/registry/auth/htpasswd"
|
||||
_ "github.com/distribution/distribution/v3/registry/storage/driver/inmemory"
|
||||
git2go "github.com/libgit2/git2go/v34"
|
||||
|
||||
"github.com/fluxcd/pkg/runtime/controller"
|
||||
"github.com/fluxcd/pkg/runtime/metrics"
|
||||
"github.com/fluxcd/pkg/runtime/testenv"
|
||||
"github.com/fluxcd/pkg/testserver"
|
||||
|
||||
sourcev1 "github.com/fluxcd/source-controller/api/v1"
|
||||
sourcev1 "github.com/fluxcd/source-controller/api/v1beta2"
|
||||
"github.com/fluxcd/source-controller/internal/cache"
|
||||
"github.com/fluxcd/source-controller/internal/features"
|
||||
"github.com/fluxcd/source-controller/internal/helm/registry"
|
||||
// +kubebuilder:scaffold:imports
|
||||
)
|
||||
|
||||
|
@ -80,7 +77,6 @@ const (
|
|||
)
|
||||
|
||||
var (
|
||||
k8sClient client.Client
|
||||
testEnv *testenv.Environment
|
||||
testStorage *Storage
|
||||
testServer *testserver.ArtifactServer
|
||||
|
@ -102,11 +98,9 @@ var (
|
|||
)
|
||||
|
||||
var (
|
||||
tlsPublicKey []byte
|
||||
tlsPrivateKey []byte
|
||||
tlsCA []byte
|
||||
clientPublicKey []byte
|
||||
clientPrivateKey []byte
|
||||
tlsPublicKey []byte
|
||||
tlsPrivateKey []byte
|
||||
tlsCA []byte
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -114,18 +108,20 @@ var (
|
|||
testCache *cache.Cache
|
||||
)
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
type registryClientTestServer struct {
|
||||
out io.Writer
|
||||
registryHost string
|
||||
workspaceDir string
|
||||
registryClient *helmreg.Client
|
||||
dnsServer *mockdns.Server
|
||||
}
|
||||
|
||||
type registryOptions struct {
|
||||
withBasicAuth bool
|
||||
withTLS bool
|
||||
withClientCertAuth bool
|
||||
withBasicAuth bool
|
||||
withTLS bool
|
||||
}
|
||||
|
||||
func setupRegistryServer(ctx context.Context, workspaceDir string, opts registryOptions) (*registryClientTestServer, error) {
|
||||
|
@ -140,11 +136,15 @@ func setupRegistryServer(ctx context.Context, workspaceDir string, opts registry
|
|||
var out bytes.Buffer
|
||||
server.out = &out
|
||||
|
||||
// init test client options
|
||||
clientOpts := []helmreg.ClientOption{
|
||||
// init test client
|
||||
client, err := helmreg.NewClient(
|
||||
helmreg.ClientOptDebug(true),
|
||||
helmreg.ClientOptWriter(server.out),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create registry client: %s", err)
|
||||
}
|
||||
server.registryClient = client
|
||||
|
||||
config := &configuration.Configuration{}
|
||||
port, err := freeport.GetFreePort()
|
||||
|
@ -152,29 +152,8 @@ func setupRegistryServer(ctx context.Context, workspaceDir string, opts registry
|
|||
return nil, fmt.Errorf("failed to get free port: %s", err)
|
||||
}
|
||||
|
||||
// Change the registry host to a host which is not localhost and
|
||||
// mock DNS to map example.com to 127.0.0.1.
|
||||
// This is required because Docker enforces HTTP if the registry
|
||||
// is hosted on localhost/127.0.0.1.
|
||||
if opts.withTLS {
|
||||
server.registryHost = fmt.Sprintf("example.com:%d", port)
|
||||
// Disable DNS server logging as it is extremely chatty.
|
||||
dnsLog := log.Default()
|
||||
dnsLog.SetOutput(io.Discard)
|
||||
server.dnsServer, err = mockdns.NewServerWithLogger(map[string]mockdns.Zone{
|
||||
"example.com.": {
|
||||
A: []string{"127.0.0.1"},
|
||||
},
|
||||
}, dnsLog, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
server.dnsServer.PatchNet(net.DefaultResolver)
|
||||
} else {
|
||||
server.registryHost = fmt.Sprintf("127.0.0.1:%d", port)
|
||||
}
|
||||
|
||||
config.HTTP.Addr = fmt.Sprintf(":%d", port)
|
||||
server.registryHost = fmt.Sprintf("localhost:%d", port)
|
||||
config.HTTP.Addr = fmt.Sprintf("127.0.0.1:%d", port)
|
||||
config.HTTP.DrainTimeout = time.Duration(10) * time.Second
|
||||
config.Storage = map[string]configuration.Parameters{"inmemory": map[string]interface{}{}}
|
||||
|
||||
|
@ -186,7 +165,8 @@ func setupRegistryServer(ctx context.Context, workspaceDir string, opts registry
|
|||
}
|
||||
|
||||
htpasswdPath := filepath.Join(workspaceDir, testRegistryHtpasswdFileBasename)
|
||||
if err = os.WriteFile(htpasswdPath, []byte(fmt.Sprintf("%s:%s\n", testRegistryUsername, string(pwBytes))), 0644); err != nil {
|
||||
err = ioutil.WriteFile(htpasswdPath, []byte(fmt.Sprintf("%s:%s\n", testRegistryUsername, string(pwBytes))), 0644)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create htpasswd file: %s", err)
|
||||
}
|
||||
|
||||
|
@ -202,90 +182,36 @@ func setupRegistryServer(ctx context.Context, workspaceDir string, opts registry
|
|||
if opts.withTLS {
|
||||
config.HTTP.TLS.Certificate = "testdata/certs/server.pem"
|
||||
config.HTTP.TLS.Key = "testdata/certs/server-key.pem"
|
||||
// Configure CA certificates only if client cert authentication is enabled.
|
||||
if opts.withClientCertAuth {
|
||||
config.HTTP.TLS.ClientCAs = []string{"testdata/certs/ca.pem"}
|
||||
}
|
||||
|
||||
// add TLS configured HTTP client option to clientOpts
|
||||
httpClient, err := tlsConfiguredHTTPCLient()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create TLS configured HTTP client: %s", err)
|
||||
}
|
||||
clientOpts = append(clientOpts, helmreg.ClientOptHTTPClient(httpClient))
|
||||
} else {
|
||||
clientOpts = append(clientOpts, helmreg.ClientOptPlainHTTP())
|
||||
}
|
||||
|
||||
// setup logger options
|
||||
config.Log.AccessLog.Disabled = true
|
||||
config.Log.Level = "error"
|
||||
logrus.SetOutput(io.Discard)
|
||||
logger := logrus.New()
|
||||
logger.SetOutput(io.Discard)
|
||||
dcontext.SetDefaultLogger(logrus.NewEntry(logger))
|
||||
|
||||
registry, err := dockerRegistry.NewRegistry(ctx, config)
|
||||
dockerRegistry, err := dockerRegistry.NewRegistry(ctx, config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create docker registry: %w", err)
|
||||
}
|
||||
|
||||
// init test client
|
||||
helmClient, err := helmreg.NewClient(clientOpts...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create registry client: %s", err)
|
||||
}
|
||||
server.registryClient = helmClient
|
||||
|
||||
// Start Docker registry
|
||||
go registry.ListenAndServe()
|
||||
go dockerRegistry.ListenAndServe()
|
||||
|
||||
return server, nil
|
||||
}
|
||||
|
||||
func tlsConfiguredHTTPCLient() (*http.Client, error) {
|
||||
pool := x509.NewCertPool()
|
||||
if !pool.AppendCertsFromPEM(tlsCA) {
|
||||
return nil, fmt.Errorf("failed to append CA certificate to pool")
|
||||
}
|
||||
cert, err := tls.LoadX509KeyPair("testdata/certs/server.pem", "testdata/certs/server-key.pem")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load server certificate: %s", err)
|
||||
}
|
||||
httpClient := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
RootCAs: pool,
|
||||
Certificates: []tls.Certificate{
|
||||
cert,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return httpClient, nil
|
||||
}
|
||||
|
||||
func (r *registryClientTestServer) Close() {
|
||||
if r.dnsServer != nil {
|
||||
mockdns.UnpatchNet(net.DefaultResolver)
|
||||
r.dnsServer.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
mustHaveNoThreadSupport()
|
||||
|
||||
initTestTLS()
|
||||
|
||||
utilruntime.Must(sourcev1.AddToScheme(scheme.Scheme))
|
||||
|
||||
testEnv = testenv.New(
|
||||
testenv.WithCRDPath(filepath.Join("..", "..", "config", "crd", "bases")),
|
||||
testenv.WithMaxConcurrentReconciles(4),
|
||||
)
|
||||
testEnv = testenv.New(testenv.WithCRDPath(filepath.Join("..", "config", "crd", "bases")))
|
||||
|
||||
var err error
|
||||
// Initialize a cacheless client for tests that need the latest objects.
|
||||
k8sClient, err = client.New(testEnv.Config, client.Options{Scheme: scheme.Scheme})
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to create k8s client: %v", err))
|
||||
}
|
||||
|
||||
testServer, err = testserver.NewTempArtifactServer()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to create a temporary storage server: %v", err))
|
||||
|
@ -298,7 +224,7 @@ func TestMain(m *testing.M) {
|
|||
panic(fmt.Sprintf("Failed to create a test storage: %v", err))
|
||||
}
|
||||
|
||||
testMetricsH = controller.NewMetrics(testEnv, metrics.MustMakeRecorder(), sourcev1.SourceFinalizer)
|
||||
testMetricsH = controller.MustMakeMetrics(testEnv)
|
||||
|
||||
testWorkspaceDir, err := os.MkdirTemp("", "registry-test-")
|
||||
if err != nil {
|
||||
|
@ -310,16 +236,23 @@ func TestMain(m *testing.M) {
|
|||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to create a test registry server: %v", err))
|
||||
}
|
||||
defer testRegistryServer.Close()
|
||||
|
||||
if err = transport.InitManagedTransport(); err != nil {
|
||||
panic(fmt.Sprintf("Failed to initialize libgit2 managed transport: %v", err))
|
||||
}
|
||||
|
||||
if err := (&GitRepositoryReconciler{
|
||||
Client: testEnv,
|
||||
EventRecorder: record.NewFakeRecorder(32),
|
||||
Metrics: testMetricsH,
|
||||
Storage: testStorage,
|
||||
}).SetupWithManagerAndOptions(testEnv, GitRepositoryReconcilerOptions{
|
||||
RateLimiter: controller.GetDefaultRateLimiter(),
|
||||
}); err != nil {
|
||||
features: map[string]bool{
|
||||
features.OptimizedGitClones: true,
|
||||
// Ensure that both implementations are used during tests.
|
||||
features.ForceGoGitImplementation: false,
|
||||
},
|
||||
Libgit2TransportInitialized: transport.Enabled,
|
||||
}).SetupWithManager(testEnv); err != nil {
|
||||
panic(fmt.Sprintf("Failed to start GitRepositoryReconciler: %v", err))
|
||||
}
|
||||
|
||||
|
@ -328,9 +261,7 @@ func TestMain(m *testing.M) {
|
|||
EventRecorder: record.NewFakeRecorder(32),
|
||||
Metrics: testMetricsH,
|
||||
Storage: testStorage,
|
||||
}).SetupWithManagerAndOptions(testEnv, BucketReconcilerOptions{
|
||||
RateLimiter: controller.GetDefaultRateLimiter(),
|
||||
}); err != nil {
|
||||
}).SetupWithManager(testEnv); err != nil {
|
||||
panic(fmt.Sprintf("Failed to start BucketReconciler: %v", err))
|
||||
}
|
||||
|
||||
|
@ -342,9 +273,7 @@ func TestMain(m *testing.M) {
|
|||
EventRecorder: record.NewFakeRecorder(32),
|
||||
Metrics: testMetricsH,
|
||||
Storage: testStorage,
|
||||
}).SetupWithManagerAndOptions(testEnv, OCIRepositoryReconcilerOptions{
|
||||
RateLimiter: controller.GetDefaultRateLimiter(),
|
||||
}); err != nil {
|
||||
}).SetupWithManager(testEnv); err != nil {
|
||||
panic(fmt.Sprintf("Failed to start OCIRepositoryReconciler: %v", err))
|
||||
}
|
||||
|
||||
|
@ -357,12 +286,20 @@ func TestMain(m *testing.M) {
|
|||
Cache: testCache,
|
||||
TTL: 1 * time.Second,
|
||||
CacheRecorder: cacheRecorder,
|
||||
}).SetupWithManagerAndOptions(testEnv, HelmRepositoryReconcilerOptions{
|
||||
RateLimiter: controller.GetDefaultRateLimiter(),
|
||||
}); err != nil {
|
||||
}).SetupWithManager(testEnv); err != nil {
|
||||
panic(fmt.Sprintf("Failed to start HelmRepositoryReconciler: %v", err))
|
||||
}
|
||||
|
||||
if err = (&HelmRepositoryOCIReconciler{
|
||||
Client: testEnv,
|
||||
EventRecorder: record.NewFakeRecorder(32),
|
||||
Metrics: testMetricsH,
|
||||
Getters: testGetters,
|
||||
RegistryClientGenerator: registry.ClientGenerator,
|
||||
}).SetupWithManager(testEnv); err != nil {
|
||||
panic(fmt.Sprintf("Failed to start HelmRepositoryOCIReconciler: %v", err))
|
||||
}
|
||||
|
||||
if err := (&HelmChartReconciler{
|
||||
Client: testEnv,
|
||||
EventRecorder: record.NewFakeRecorder(32),
|
||||
|
@ -372,10 +309,8 @@ func TestMain(m *testing.M) {
|
|||
Cache: testCache,
|
||||
TTL: 1 * time.Second,
|
||||
CacheRecorder: cacheRecorder,
|
||||
}).SetupWithManagerAndOptions(ctx, testEnv, HelmChartReconcilerOptions{
|
||||
RateLimiter: controller.GetDefaultRateLimiter(),
|
||||
}); err != nil {
|
||||
panic(fmt.Sprintf("Failed to start HelmChartReconciler: %v", err))
|
||||
}).SetupWithManager(testEnv); err != nil {
|
||||
panic(fmt.Sprintf("Failed to start HelmRepositoryReconciler: %v", err))
|
||||
}
|
||||
|
||||
go func() {
|
||||
|
@ -420,14 +355,6 @@ func initTestTLS() {
|
|||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
clientPrivateKey, err = os.ReadFile("testdata/certs/client-key.pem")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
clientPublicKey, err = os.ReadFile("testdata/certs/client.pem")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func newTestStorage(s *testserver.HTTPServer) (*Storage, error) {
|
||||
|
@ -452,7 +379,21 @@ func int64p(i int64) *int64 {
|
|||
return &i
|
||||
}
|
||||
|
||||
func logOCIRepoStatus(t *testing.T, obj *sourcev1.OCIRepository) {
|
||||
sts, _ := yaml.Marshal(obj.Status)
|
||||
t.Log(string(sts))
|
||||
// This provides a regression assurance for image-automation-controller/#339.
|
||||
// Validates that:
|
||||
// - libgit2 was built with no support for threads.
|
||||
// - git2go accepts libgit2 built with no support for threads.
|
||||
//
|
||||
// The logic below does the validation of the former, whilst
|
||||
// referring to git2go forces its init() execution, which is
|
||||
// where any validation to that effect resides.
|
||||
//
|
||||
// git2go does not support threadless libgit2 by default,
|
||||
// hence a fork is being used which disables such validation.
|
||||
//
|
||||
// TODO: extract logic into pkg.
|
||||
func mustHaveNoThreadSupport() {
|
||||
if git2go.Features()&git2go.FeatureThreads != 0 {
|
||||
panic("libgit2 must not be build with thread support")
|
||||
}
|
||||
}
|
|
@ -12,7 +12,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
all: server-key.pem client-key.pem
|
||||
all: server-key.pem
|
||||
|
||||
ca-key.pem: ca-csr.json
|
||||
cfssl gencert -initca ca-csr.json | cfssljson -bare ca –
|
||||
|
@ -28,13 +28,3 @@ server-key.pem: server-csr.json ca-config.json ca-key.pem
|
|||
server-csr.json | cfssljson -bare server
|
||||
sever.pem: server-key.pem
|
||||
server.csr: server-key.pem
|
||||
|
||||
client-key.pem: client-csr.json ca-config.json ca-key.pem
|
||||
cfssl gencert \
|
||||
-ca=ca.pem \
|
||||
-ca-key=ca-key.pem \
|
||||
-config=ca-config.json \
|
||||
-profile=web-servers \
|
||||
client-csr.json | cfssljson -bare client
|
||||
client.pem: client-key.pem
|
||||
client.csr: client-key.pem
|
|
@ -0,0 +1,5 @@
|
|||
-----BEGIN EC PRIVATE KEY-----
|
||||
MHcCAQEEIOH/u9dMcpVcZ0+X9Fc78dCTj8SHuXawhLjhu/ej64WToAoGCCqGSM49
|
||||
AwEHoUQDQgAEruH/kPxtX3cyYR2G7TYmxLq6AHyzo/NGXc9XjGzdJutE2SQzn37H
|
||||
dvSJbH+Lvqo9ik0uiJVRVdCYD1j7gNszGA==
|
||||
-----END EC PRIVATE KEY-----
|
|
@ -0,0 +1,9 @@
|
|||
-----BEGIN CERTIFICATE REQUEST-----
|
||||
MIIBIDCBxgIBADAZMRcwFQYDVQQDEw5leGFtcGxlLmNvbSBDQTBZMBMGByqGSM49
|
||||
AgEGCCqGSM49AwEHA0IABK7h/5D8bV93MmEdhu02JsS6ugB8s6PzRl3PV4xs3Sbr
|
||||
RNkkM59+x3b0iWx/i76qPYpNLoiVUVXQmA9Y+4DbMxigSzBJBgkqhkiG9w0BCQ4x
|
||||
PDA6MDgGA1UdEQQxMC+CCWxvY2FsaG9zdIILZXhhbXBsZS5jb22CD3d3dy5leGFt
|
||||
cGxlLmNvbYcEfwAAATAKBggqhkjOPQQDAgNJADBGAiEAkw85nyLhJssyCYsaFvRU
|
||||
EErhu66xHPJug/nG50uV5OoCIQCUorrflOSxfChPeCe4xfwcPv7FpcCYbKVYtGzz
|
||||
b34Wow==
|
||||
-----END CERTIFICATE REQUEST-----
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue