Compare commits

..

3 Commits

Author SHA1 Message Date
Hasan Turken e0fe649153
Merge pull request #393 from lsviben/release-0.17
[Backport release-0.17] Max index value validation
2023-03-09 18:51:36 +03:00
Philippe Scorsolini 08b0fc76bf
fix: properly validate max index
Signed-off-by: Philippe Scorsolini <p.scorsolini@gmail.com>
(cherry picked from commit 0aac4ba546)
2023-03-09 15:50:08 +01:00
Philippe Scorsolini d4fccb39a4
fix: enforce max index value for paths
Signed-off-by: Philippe Scorsolini <p.scorsolini@gmail.com>
(cherry picked from commit 7560fbc041)
2023-03-09 15:47:51 +01:00
184 changed files with 12830 additions and 19656 deletions

View File

@ -1,32 +1,35 @@
<!--
Thank you for helping to improve Crossplane! Please read the contribution docs
(linked below) if this is your first Crossplane pull request.
Thank you for helping to improve Crossplane!
Please read through https://git.io/fj2m9 if this is your first time opening a
Crossplane pull request. Find us in https://slack.crossplane.io/messages/dev if
you need any help contributing.
-->
### Description of your changes
<!--
Briefly describe what this pull request does, and how it is covered by tests.
Be proactive - direct your reviewers' attention to anything that needs special
consideration.
Briefly describe what this pull request does. Be sure to direct your reviewers'
attention to anything that needs special consideration.
We love pull requests that resolve an open Crossplane issue. If yours does, you
can uncomment the below line to indicate which issue your PR fixes, for example
"Fixes #500":
We love pull requests that fix an open issue. If yours does, use the below line
to indicate which issue it fixes, for example "Fixes #500".
-->
Fixes #
Fixes #
I have: <!--You MUST either [x] check or [ ] ~strike through~ every item.-->
I have:
- [ ] Read and followed Crossplane's [contribution process].
- [ ] Run `earthly +reviewable` to ensure this PR is ready for review.
- [ ] Added or updated unit tests.
- [ ] Linked a PR or a [docs tracking issue] to [document this change].
- [ ] Added `backport release-x.y` labels to auto-backport this PR.
- [ ] Run `make reviewable test` to ensure this PR is ready for review.
Need help with this checklist? See the [cheat sheet].
### How has this code been tested
[contribution process]: https://github.com/crossplane/crossplane/tree/main/contributing
[docs tracking issue]: https://github.com/crossplane/docs/issues/new
[document this change]: https://docs.crossplane.io/contribute/contribute
[cheat sheet]: https://github.com/crossplane/crossplane/tree/main/contributing#checklist-cheat-sheet
<!--
Before reviewers can be confident in the correctness of this pull request, it
needs to tested and shown to be correct. Briefly describe the testing that has
already been done or which is planned for this change.
-->
[contribution process]: https://git.io/fj2m9

View File

@ -1,7 +0,0 @@
#!/bin/sh
curl -fsSLo /usr/local/bin/earthly https://github.com/earthly/earthly/releases/latest/download/earthly-linux-amd64
chmod +x /usr/local/bin/earthly
/usr/local/bin/earthly bootstrap
renovate

262
.github/renovate.json5 vendored
View File

@ -1,262 +0,0 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": [
"config:recommended",
"helpers:pinGitHubActionDigests",
":semanticCommits"
],
// We only want renovate to rebase PRs when they have conflicts, default
// "auto" mode is not required.
"rebaseWhen": "conflicted",
// The maximum number of PRs to be created in parallel
"prConcurrentLimit": 5,
// The branches renovate should target
// PLEASE UPDATE THIS WHEN RELEASING.
"baseBranches": [
'main',
'release-1.18',
'release-1.19',
'release-1.20',
],
"ignorePaths": [
"design/**",
// We test upgrades, so leave it on an older version on purpose.
"test/e2e/manifests/pkg/provider/provider-initial.yaml",
],
"postUpdateOptions": [
"gomodTidy"
],
// All PRs should have a label
"labels": [
"automated"
],
"customManagers": [
{
"customType": "regex",
"description": "Bump Earthly version in GitHub workflows",
"fileMatch": [
"^\\.github\\/workflows\\/[^/]+\\.ya?ml$"
],
"matchStrings": [
"EARTHLY_VERSION: '(?<currentValue>.*?)'\\n"
],
"datasourceTemplate": "github-releases",
"depNameTemplate": "earthly/earthly",
"extractVersionTemplate": "^v(?<version>.*)$"
},
{
"customType": "regex",
"description": "Bump Go version in Earthfile",
"fileMatch": [
"^Earthfile$"
],
"matchStrings": [
"ARG --global GO_VERSION=(?<currentValue>.*?)\\n"
],
"datasourceTemplate": "golang-version",
"depNameTemplate": "golang"
},
{
"customType": "regex",
"description": "Bump golangci-lint version in the Earthfile",
"fileMatch": [
"^Earthfile$"
],
"matchStrings": [
"ARG GOLANGCI_LINT_VERSION=(?<currentValue>.*?)\\n"
],
"datasourceTemplate": "github-releases",
"depNameTemplate": "golangci/golangci-lint"
},
{
"customType": "regex",
"description": "Bump codeql version in the Earthfile",
"fileMatch": [
"^Earthfile$"
],
"matchStrings": [
"ARG CODEQL_VERSION=(?<currentValue>.*?)\\n"
],
"datasourceTemplate": "github-releases",
"depNameTemplate": "github/codeql-action",
"extractVersionTemplate": "^codeql-bundle-(?<version>.*)$"
},
],
// Renovate doesn't have native Earthfile support, but because Earthfile
// syntax is a superset of Dockerfile syntax this works to update FROM images.
// https://github.com/renovatebot/renovate/issues/15975
"dockerfile": {
"fileMatch": [
"(^|/)Earthfile$"
]
},
// PackageRules disabled below should be enabled in case of vulnerabilities
"vulnerabilityAlerts": {
"enabled": true
},
"osvVulnerabilityAlerts": true,
// Renovate evaluates all packageRules in order, so low priority rules should
// be at the beginning, high priority at the end
"packageRules": [
{
"description": "Generate code after upgrading go dependencies (main)",
"matchDatasources": [
"go"
],
// Currently we only have an Earthfile on main and some release branches, so we ignore the ones we know don't have it.
matchBaseBranches: [
'!/release-1\.16/',
],
postUpgradeTasks: {
// Post-upgrade tasks that are executed before a commit is made by Renovate.
"commands": [
"earthly --strict +go-generate",
],
fileFilters: [
"**/*"
],
executionMode: "update",
},
},
{
"description": "Generate code after upgrading go dependencies (release branch)",
"matchDatasources": [
"go"
],
// Currently we only have an Earthfile on main and some release branches, so we only run this on older release branches.
matchBaseBranches: [
'release-1.16',
],
postUpgradeTasks: {
// Post-upgrade tasks that are executed before a commit is made by Renovate.
"commands": [
"make go.generate",
],
fileFilters: [
"**/*"
],
executionMode: "update",
},
},
{
"description": "Lint code after upgrading golangci-lint (main)",
"matchDepNames": [
"golangci/golangci-lint"
],
// Currently we only have an Earthfile on main and some release branches, so we ignore the ones we know don't have it.
matchBaseBranches: [
'!/release-1\.16/',
],
postUpgradeTasks: {
// Post-upgrade tasks that are executed before a commit is made by Renovate.
"commands": [
"earthly --strict +go-lint",
],
fileFilters: [
"**/*"
],
executionMode: "update",
},
},
{
"description": "Lint code after upgrading golangci-lint (release branch)",
"matchDepNames": [
"golangci/golangci-lint"
],
// Currently we only have an Earthfile on main and some release branches, so we only run this on older release branches.
matchBaseBranches: [
'release-1.16',
],
postUpgradeTasks: {
// Post-upgrade tasks that are executed before a commit is made by Renovate.
"commands": [
"make go.lint",
],
fileFilters: [
"**/*"
],
executionMode: "update",
},
},
{
"description": "Ignore non-security related updates to release branches",
matchBaseBranches: [
"/^release-.*/"
],
enabled: false,
},
{
"description": "Still update Docker images on release branches though",
"matchDatasources": [
"docker"
],
matchBaseBranches: [
"/^release-.*/"
],
enabled: true,
},
{
"description": "Only get Docker image updates every 2 weeks to reduce noise",
"matchDatasources": [
"docker"
],
"schedule": [
"every 2 week on monday"
],
enabled: true,
},
{
"description": "Ignore k8s.io/client-go older versions, they switched to semantic version and old tags are still available in the repo",
"matchDatasources": [
"go"
],
"matchDepNames": [
"k8s.io/client-go"
],
"allowedVersions": "<1.0",
},
{
"description": "Ignore k8s dependencies, should be updated on crossplane-runtime",
"matchDatasources": [
"go"
],
"matchPackagePrefixes": [
"k8s.io",
"sigs.k8s.io"
],
"enabled": false,
},
{
"description": "Only get dependency digest updates every month to reduce noise, except crossplane-runtime",
"excludePackageNames": [
"github.com/crossplane/crossplane-runtime"
],
"matchDatasources": [
"go"
],
"matchUpdateTypes": [
"digest",
],
"extends": [
"schedule:monthly"
],
},
{
"description": "Ignore oss-fuzz, it's not using tags, we'll stick to master",
"matchDepTypes": [
"action"
],
"matchDepNames": [
"google/oss-fuzz"
],
"enabled": false
},
{
"description": "Group all go version updates",
"matchDatasources": [
"golang-version"
],
"groupName": "golang version",
}
],
}

38
.github/stale.yml vendored Normal file
View File

@ -0,0 +1,38 @@
# Configuration for probot-stale - https://github.com/probot/stale
# Number of days of inactivity before an Issue or Pull Request becomes stale
daysUntilStale: 90
# Number of days of inactivity before a stale Issue or Pull Request is closed.
# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale.
daysUntilClose: 7
# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable
exemptLabels:
- security
# Set to true to ignore issues in a project (defaults to false)
exemptProjects: false
# Set to true to ignore issues in a milestone (defaults to false)
exemptMilestones: false
# Label to use when marking as stale
staleLabel: wontfix
# Comment to post when marking as stale. Set to `false` to disable
markComment: >
This issue has been automatically marked as stale because it has not had
recent activity. It will be closed if no further activity occurs. Thank you
for your contributions.
# Comment to post when closing a stale Issue or Pull Request.
closeComment: >
This issue has been automatically closed due to inactivity. Please re-open
if this still requires investigation.
# Limit the number of actions per hour, from 1-30. Default is 30
limitPerRun: 30
# Limit to only `issues` or `pulls`
only: issues

View File

@ -18,16 +18,17 @@ jobs:
# The main gotchas with this action are that it _only_ supports merge commits,
# and that PRs _must_ be labelled before they're merged to trigger a backport.
open-pr:
runs-on: ubuntu-22.04
runs-on: ubuntu-18.04
if: github.event.pull_request.merged
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Open Backport PR
uses: zeebe-io/backport-action@ef20d86abccbac3ee3a73cb2efbdc06344c390e5 # v2.5.0
uses: zeebe-io/backport-action@v0.0.4
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
github_workspace: ${{ github.workspace }}
version: v0.0.4

View File

@ -3,243 +3,322 @@ name: CI
on:
push:
branches:
- main
- master
- release-*
pull_request: {}
workflow_dispatch: {}
env:
# Common versions
EARTHLY_VERSION: '0.8.15'
# Force Earthly to use color output
FORCE_COLOR: "1"
GO_VERSION: '1.17'
GOLANGCI_VERSION: 'v1.31'
DOCKER_BUILDX_VERSION: 'v0.4.2'
# Common users. We can't run a step 'if secrets.AWS_USR != ""' but we can run
# a step 'if env.AWS_USR' != ""', so we copy these to succinctly test whether
# credentials have been provided before trying to run steps that need them.
DOCKER_USR: ${{ secrets.DOCKER_USR }}
AWS_USR: ${{ secrets.AWS_USR }}
jobs:
check-diff:
runs-on: ubuntu-22.04
detect-noop:
runs-on: ubuntu-18.04
outputs:
noop: ${{ steps.noop.outputs.should_skip }}
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Setup Earthly
uses: earthly/actions-setup@v1
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
version: ${{ env.EARTHLY_VERSION }}
- name: Login to DockerHub
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3
if: env.DOCKER_USR != ''
with:
username: ${{ secrets.DOCKER_USR }}
password: ${{ secrets.DOCKER_PSW }}
- name: Login to GitHub Container Registry
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Configure Earthly to Push Cache to GitHub Container Registry
if: github.ref == 'refs/heads/main'
run: |
echo "EARTHLY_PUSH=true" >> $GITHUB_ENV
echo "EARTHLY_MAX_REMOTE_CACHE=true" >> $GITHUB_ENV
- name: Generate Files
run: earthly --strict --remote-cache ghcr.io/crossplane/crossplane-runtime-earthly-cache:${{ github.job }} +generate
- name: Count Changed Files
id: changed_files
run: echo "count=$(git status --porcelain | wc -l)" >> $GITHUB_OUTPUT
- name: Fail if Files Changed
if: steps.changed_files.outputs.count != 0
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7
with:
script: core.setFailed('Found changed files after running earthly +generate.')
lint:
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Setup Earthly
uses: earthly/actions-setup@v1
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
version: ${{ env.EARTHLY_VERSION }}
- name: Login to DockerHub
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3
if: env.DOCKER_USR != ''
with:
username: ${{ secrets.DOCKER_USR }}
password: ${{ secrets.DOCKER_PSW }}
- name: Login to GitHub Container Registry
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Configure Earthly to Push Cache to GitHub Container Registry
if: github.ref == 'refs/heads/main'
run: |
echo "EARTHLY_PUSH=true" >> $GITHUB_ENV
echo "EARTHLY_MAX_REMOTE_CACHE=true" >> $GITHUB_ENV
- name: Lint
run: earthly --strict --remote-cache ghcr.io/crossplane/crossplane-runtime-earthly-cache:${{ github.job }} +lint
codeql:
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Setup Earthly
uses: earthly/actions-setup@v1
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
version: ${{ env.EARTHLY_VERSION }}
- name: Login to DockerHub
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3
if: env.DOCKER_USR != ''
with:
username: ${{ secrets.DOCKER_USR }}
password: ${{ secrets.DOCKER_PSW }}
- name: Login to GitHub Container Registry
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Configure Earthly to Push Cache to GitHub Container Registry
if: github.ref == 'refs/heads/main'
run: |
echo "EARTHLY_PUSH=true" >> $GITHUB_ENV
echo "EARTHLY_MAX_REMOTE_CACHE=true" >> $GITHUB_ENV
- name: Run CodeQL
run: earthly --strict --remote-cache ghcr.io/crossplane/crossplane-runtime-earthly-cache:${{ github.job }} +ci-codeql
- name: Upload CodeQL Results to GitHub
uses: github/codeql-action/upload-sarif@b56ba49b26e50535fa1e7f7db0f4f7b4bf65d80d # v3
with:
sarif_file: '_output/codeql/go.sarif'
trivy-scan-fs:
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Run Trivy vulnerability scanner in fs mode
uses: aquasecurity/trivy-action@18f2510ee396bbf400402947b394f2dd8c87dbb0 # 0.29.0
with:
scan-type: 'fs'
ignore-unfixed: true
skip-dirs: design
scan-ref: '.'
severity: 'CRITICAL,HIGH'
format: sarif
output: 'trivy-results.sarif'
- name: Upload Trivy Results to GitHub
uses: github/codeql-action/upload-sarif@b56ba49b26e50535fa1e7f7db0f4f7b4bf65d80d # v3
with:
sarif_file: 'trivy-results.sarif'
unit-tests:
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Setup Earthly
uses: earthly/actions-setup@v1
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
version: ${{ env.EARTHLY_VERSION }}
- name: Login to DockerHub
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3
if: env.DOCKER_USR != ''
with:
username: ${{ secrets.DOCKER_USR }}
password: ${{ secrets.DOCKER_PSW }}
- name: Login to GitHub Container Registry
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Configure Earthly to Push Cache to GitHub Container Registry
if: github.ref == 'refs/heads/main'
run: |
echo "EARTHLY_PUSH=true" >> $GITHUB_ENV
echo "EARTHLY_MAX_REMOTE_CACHE=true" >> $GITHUB_ENV
- name: Run Unit Tests
run: earthly --strict --remote-cache ghcr.io/crossplane/crossplane-runtime-earthly-cache:${{ github.job }} +test
- name: Publish Unit Test Coverage
uses: codecov/codecov-action@b9fd7d16f6d7d1b5d2bec1a2887e65ceed900238 # v4
with:
flags: unittests
file: _output/tests/coverage.txt
token: ${{ secrets.CODECOV_TOKEN }}
protobuf-schemas:
runs-on: ubuntu-22.04
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Setup Buf
uses: bufbuild/buf-setup-action@v1
- name: Detect No-op Changes
id: noop
uses: fkirc/skip-duplicate-actions@v2.0.0
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
paths_ignore: '["**.md", "**.png", "**.jpg"]'
do_not_skip: '["workflow_dispatch", "schedule", "push"]'
- name: Lint Protocol Buffers
uses: bufbuild/buf-lint-action@v1
with:
input: apis
# buf-breaking-action doesn't support branches
# https://github.com/bufbuild/buf-push-action/issues/34
- name: Detect Breaking Changes in Protocol Buffers
uses: bufbuild/buf-breaking-action@a074e988ee34efcd4927079e79c611f428354c01 # v1
# We want to run this for the main branch, and PRs against main.
if: ${{ github.ref == 'refs/heads/main' || github.base_ref == 'main' }}
with:
input: apis
against: "https://github.com/${GITHUB_REPOSITORY}.git#branch=main,subdir=apis"
lint:
runs-on: ubuntu-18.04
needs: detect-noop
if: needs.detect-noop.outputs.noop != 'true'
- name: Push Protocol Buffers to Buf Schema Registry
if: ${{ github.repository == 'crossplane/crossplane-runtime' && github.ref == 'refs/heads/main' }}
uses: bufbuild/buf-push-action@v1
steps:
- name: Checkout
uses: actions/checkout@v2
with:
input: apis
buf_token: ${{ secrets.BUF_TOKEN }}
submodules: true
- name: Find the Go Build Cache
id: go
run: echo "::set-output name=cache::$(go env GOCACHE)"
- name: Cache the Go Build Cache
uses: actions/cache@v2
with:
path: ${{ steps.go.outputs.cache }}
key: ${{ runner.os }}-build-lint-${{ hashFiles('**/go.sum') }}
restore-keys: ${{ runner.os }}-build-lint-
- name: Cache Go Dependencies
uses: actions/cache@v2
with:
path: .work/pkg
key: ${{ runner.os }}-pkg-${{ hashFiles('**/go.sum') }}
restore-keys: ${{ runner.os }}-pkg-
- name: Vendor Dependencies
run: make vendor vendor.check
# Go version coming with golangci-lint-action may not be our desired
# go version. We deploy our desired go version and then skip go
# installation in golangci-lint-action in the next step as suggested
# in https://github.com/golangci/golangci-lint-action/issues/183
- uses: actions/setup-go@v2
with:
go-version: ${{ env.GO_VERSION }}
# We could run 'make lint' to ensure our desired Go version, but we
# prefer this action because it leaves 'annotations' (i.e. it comments
# on PRs to point out linter violations).
- name: Lint
uses: golangci/golangci-lint-action@v2
with:
version: ${{ env.GOLANGCI_VERSION }}
skip-go-installation: true
check-diff:
runs-on: ubuntu-18.04
needs: detect-noop
if: needs.detect-noop.outputs.noop != 'true'
steps:
- name: Checkout
uses: actions/checkout@v2
with:
submodules: true
- name: Setup Go
uses: actions/setup-go@v2
with:
go-version: ${{ env.GO_VERSION }}
- name: Find the Go Build Cache
id: go
run: echo "::set-output name=cache::$(go env GOCACHE)"
- name: Cache the Go Build Cache
uses: actions/cache@v2
with:
path: ${{ steps.go.outputs.cache }}
key: ${{ runner.os }}-build-check-diff-${{ hashFiles('**/go.sum') }}
restore-keys: ${{ runner.os }}-build-check-diff-
- name: Cache Go Dependencies
uses: actions/cache@v2
with:
path: .work/pkg
key: ${{ runner.os }}-pkg-${{ hashFiles('**/go.sum') }}
restore-keys: ${{ runner.os }}-pkg-
- name: Vendor Dependencies
run: make vendor vendor.check
- name: Check Diff
run: make check-diff
unit-tests:
runs-on: ubuntu-18.04
needs: detect-noop
if: needs.detect-noop.outputs.noop != 'true'
steps:
- name: Checkout
uses: actions/checkout@v2
with:
submodules: true
- name: Fetch History
run: git fetch --prune --unshallow
- name: Setup Go
uses: actions/setup-go@v2
with:
go-version: ${{ env.GO_VERSION }}
- name: Find the Go Build Cache
id: go
run: echo "::set-output name=cache::$(go env GOCACHE)"
- name: Cache the Go Build Cache
uses: actions/cache@v2
with:
path: ${{ steps.go.outputs.cache }}
key: ${{ runner.os }}-build-unit-tests-${{ hashFiles('**/go.sum') }}
restore-keys: ${{ runner.os }}-build-unit-tests-
- name: Cache Go Dependencies
uses: actions/cache@v2
with:
path: .work/pkg
key: ${{ runner.os }}-pkg-${{ hashFiles('**/go.sum') }}
restore-keys: ${{ runner.os }}-pkg-
- name: Vendor Dependencies
run: make vendor vendor.check
- name: Run Unit Tests
run: make -j2 test
- name: Publish Unit Test Coverage
uses: codecov/codecov-action@v1
with:
flags: unittests
file: _output/tests/linux_amd64/coverage.txt
e2e-tests:
runs-on: ubuntu-18.04
needs: detect-noop
if: needs.detect-noop.outputs.noop != 'true'
steps:
- name: Setup QEMU
uses: docker/setup-qemu-action@v1
with:
platforms: all
- name: Setup Docker Buildx
uses: docker/setup-buildx-action@v1
with:
version: ${{ env.DOCKER_BUILDX_VERSION }}
install: true
- name: Checkout
uses: actions/checkout@v2
with:
submodules: true
- name: Fetch History
run: git fetch --prune --unshallow
- name: Setup Go
uses: actions/setup-go@v2
with:
go-version: ${{ env.GO_VERSION }}
- name: Find the Go Build Cache
id: go
run: echo "::set-output name=cache::$(go env GOCACHE)"
- name: Cache the Go Build Cache
uses: actions/cache@v2
with:
path: ${{ steps.go.outputs.cache }}
key: ${{ runner.os }}-build-e2e-tests-${{ hashFiles('**/go.sum') }}
restore-keys: ${{ runner.os }}-build-e2e-tests-
- name: Cache Go Dependencies
uses: actions/cache@v2
with:
path: .work/pkg
key: ${{ runner.os }}-pkg-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-pkg-
- name: Vendor Dependencies
run: make vendor vendor.check
- name: Build Helm Chart
run: make -j2 build
env:
# We're using docker buildx, which doesn't actually load the images it
# builds by default. Specifying --load does so.
BUILD_ARGS: "--load"
- name: Run E2E Tests
run: make e2e USE_HELM3=true
publish-artifacts:
runs-on: ubuntu-18.04
needs: detect-noop
if: needs.detect-noop.outputs.noop != 'true'
steps:
- name: Setup QEMU
uses: docker/setup-qemu-action@v1
with:
platforms: all
- name: Setup Docker Buildx
uses: docker/setup-buildx-action@v1
with:
version: ${{ env.DOCKER_BUILDX_VERSION }}
install: true
- name: Checkout
uses: actions/checkout@v2
with:
submodules: true
- name: Fetch History
run: git fetch --prune --unshallow
- name: Setup Go
uses: actions/setup-go@v2
with:
go-version: ${{ env.GO_VERSION }}
- name: Find the Go Build Cache
id: go
run: echo "::set-output name=cache::$(go env GOCACHE)"
- name: Cache the Go Build Cache
uses: actions/cache@v2
with:
path: ${{ steps.go.outputs.cache }}
key: ${{ runner.os }}-build-publish-artifacts-${{ hashFiles('**/go.sum') }}
restore-keys: ${{ runner.os }}-build-publish-artifacts-
- name: Cache Go Dependencies
uses: actions/cache@v2
with:
path: .work/pkg
key: ${{ runner.os }}-pkg-${{ hashFiles('**/go.sum') }}
restore-keys: ${{ runner.os }}-pkg-
- name: Vendor Dependencies
run: make vendor vendor.check
- name: Build Artifacts
run: make -j2 build.all
env:
# We're using docker buildx, which doesn't actually load the images it
# builds by default. Specifying --load does so.
BUILD_ARGS: "--load"
- name: Publish Artifacts to GitHub
uses: actions/upload-artifact@v2
with:
name: output
path: _output/**
- name: Login to Docker
uses: docker/login-action@v1
if: env.DOCKER_USR != ''
with:
username: ${{ secrets.DOCKER_USR }}
password: ${{ secrets.DOCKER_PSW }}
- name: Publish Artifacts to S3 and Docker Hub
run: make -j2 publish BRANCH_NAME=${GITHUB_REF##*/}
if: env.AWS_USR != '' && env.DOCKER_USR != ''
env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_USR }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_PSW }}
GIT_API_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Promote Artifacts in S3 and Docker Hub
if: github.ref == 'refs/heads/master' && env.AWS_USR != '' && env.DOCKER_USR != ''
run: make -j2 promote
env:
BRANCH_NAME: master
CHANNEL: master
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_USR }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_PSW }}

View File

@ -4,13 +4,13 @@ on: issue_comment
jobs:
points:
runs-on: ubuntu-22.04
runs-on: ubuntu-18.04
if: startsWith(github.event.comment.body, '/points')
steps:
- name: Extract Command
id: command
uses: xt0rted/slash-command-action@bf51f8f5f4ea3d58abc7eca58f77104182b23e88 # v2
uses: xt0rted/slash-command-action@v1
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
command: points
@ -19,7 +19,7 @@ jobs:
allow-edits: "false"
permission-level: write
- name: Handle Command
uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7
uses: actions/github-script@v4
env:
POINTS: ${{ steps.command.outputs.command-arguments }}
with:
@ -65,12 +65,12 @@ jobs:
# NOTE(negz): See also backport.yml, which is the variant that triggers on PR
# merge rather than on comment.
backport:
runs-on: ubuntu-22.04
runs-on: ubuntu-18.04
if: github.event.issue.pull_request && startsWith(github.event.comment.body, '/backport')
steps:
- name: Extract Command
id: command
uses: xt0rted/slash-command-action@bf51f8f5f4ea3d58abc7eca58f77104182b23e88 # v2
uses: xt0rted/slash-command-action@v1
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
command: backport
@ -80,32 +80,13 @@ jobs:
permission-level: write
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Open Backport PR
uses: zeebe-io/backport-action@ef20d86abccbac3ee3a73cb2efbdc06344c390e5 # v2.5.0
uses: zeebe-io/backport-action@v0.0.4
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
github_workspace: ${{ github.workspace }}
fresh:
runs-on: ubuntu-22.04
if: startsWith(github.event.comment.body, '/fresh')
steps:
- name: Extract Command
id: command
uses: xt0rted/slash-command-action@bf51f8f5f4ea3d58abc7eca58f77104182b23e88 # v2
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
command: fresh
reaction: "true"
reaction-type: "eyes"
allow-edits: "false"
permission-level: read
- name: Handle Command
uses: actions-ecosystem/action-remove-labels@2ce5d41b4b6aa8503e285553f75ed56e0a40bae0 # v1
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
labels: stale
version: v0.0.4

49
.github/workflows/promote.yml vendored Normal file
View File

@ -0,0 +1,49 @@
name: Promote
on:
workflow_dispatch:
inputs:
version:
description: 'Release version (e.g. v0.1.0)'
required: true
channel:
description: 'Release channel'
required: true
default: 'alpha'
env:
# Common users. We can't run a step 'if secrets.AWS_USR != ""' but we can run
# a step 'if env.AWS_USR' != ""', so we copy these to succinctly test whether
# credentials have been provided before trying to run steps that need them.
DOCKER_USR: ${{ secrets.DOCKER_USR }}
AWS_USR: ${{ secrets.AWS_USR }}
jobs:
promote-artifacts:
runs-on: ubuntu-18.04
steps:
- name: Checkout
uses: actions/checkout@v2
with:
submodules: true
- name: Fetch History
run: git fetch --prune --unshallow
- name: Login to Docker
uses: docker/login-action@v1
if: env.DOCKER_USR != ''
with:
username: ${{ secrets.DOCKER_USR }}
password: ${{ secrets.DOCKER_PSW }}
- name: Promote Artifacts in S3 and Docker Hub
if: env.AWS_USR != '' && env.DOCKER_USR != ''
run: make -j2 promote BRANCH_NAME=${GITHUB_REF##*/}
env:
VERSION: ${{ github.event.inputs.version }}
CHANNEL: ${{ github.event.inputs.channel }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_USR }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_PSW }}

View File

@ -1,54 +0,0 @@
name: Renovate
on:
# Allows manual/automated trigger for debugging purposes
workflow_dispatch:
inputs:
logLevel:
description: "Renovate's log level"
required: true
default: "info"
type: string
schedule:
- cron: '0 8 * * *'
env:
# Common versions
EARTHLY_VERSION: '0.8.15'
LOG_LEVEL: "info"
jobs:
renovate:
runs-on: ubuntu-latest
if: |
!github.event.repository.fork &&
!github.event.pull_request.head.repo.fork
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
# Don't waste time starting Renovate if JSON is invalid
- name: Validate Renovate JSON
run: npx --yes --package renovate -- renovate-config-validator
- name: Get token
id: get-github-app-token
uses: actions/create-github-app-token@d72941d797fd3113feb6b93fd0dec494b13a2547 # v1
with:
app-id: ${{ secrets.RENOVATE_GITHUB_APP_ID }}
private-key: ${{ secrets.RENOVATE_GITHUB_APP_PRIVATE_KEY }}
- name: Self-hosted Renovate
uses: renovatebot/github-action@0984fb80fc633b17e57f3e8b6c007fe0dc3e0d62 # v40.3.6
env:
RENOVATE_REPOSITORIES: ${{ github.repository }}
# Use GitHub API to create commits
RENOVATE_PLATFORM_COMMIT: "true"
LOG_LEVEL: ${{ github.event.inputs.logLevel || env.LOG_LEVEL }}
RENOVATE_ALLOWED_POST_UPGRADE_COMMANDS: '["^earthly .+"]'
with:
configurationFile: .github/renovate.json5
token: '${{ steps.get-github-app-token.outputs.token }}'
mount-docker-socket: true
docker-user: root
docker-cmd-file: .github/renovate-entrypoint.sh

View File

@ -1,47 +0,0 @@
name: Stale Issues and PRs
on:
schedule:
# Process new stale issues once a day. Folks can /fresh for a fast un-stale
# per the commands workflow. Run at 1:15 mostly as a somewhat unique time to
# help correlate any issues with this workflow.
- cron: '15 1 * * *'
workflow_dispatch: {}
permissions:
issues: write
pull-requests: write
jobs:
stale:
runs-on: ubuntu-22.04
steps:
- uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9
with:
# This action uses ~2 operations per stale issue per run to determine
# whether it's still stale. It also uses 2-3 operations to mark an issue
# stale or not. During steady state (no issues to mark stale, check, or
# close) we seem to use less than 10 operations with ~150 issues and PRs
# open.
#
# Our hourly rate-limit budget for all workflows that use GITHUB_TOKEN
# is 1,000 requests per the below docs.
# https://docs.github.com/en/rest/overview/resources-in-the-rest-api#requests-from-github-actions
operations-per-run: 100
days-before-stale: 90
days-before-close: 14
stale-issue-label: stale
exempt-issue-labels: exempt-from-stale
stale-issue-message: >
Crossplane does not currently have enough maintainers to address every
issue and pull request. This issue has been automatically marked as
`stale` because it has had no activity in the last 90 days. It will be
closed in 14 days if no further activity occurs. Leaving a comment
**starting with** `/fresh` will mark this issue as not stale.
stale-pr-label: stale
exempt-pr-labels: exempt-from-stale
stale-pr-message:
Crossplane does not currently have enough maintainers to address every
issue and pull request. This pull request has been automatically
marked as `stale` because it has had no activity in the last 90 days.
It will be closed in 14 days if no further activity occurs.
Adding a comment **starting with** `/fresh` will mark this PR as not stale.

View File

@ -12,15 +12,15 @@ on:
jobs:
create-tag:
runs-on: ubuntu-22.04
runs-on: ubuntu-18.04
steps:
- name: Checkout
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
uses: actions/checkout@v2
- name: Create Tag
uses: negz/create-tag@39bae1e0932567a58c20dea5a1a0d18358503320 # v1
uses: negz/create-tag@v1
with:
version: ${{ github.event.inputs.version }}
message: ${{ github.event.inputs.message }}
token: ${{ secrets.GITHUB_TOKEN }}
token: ${{ secrets.GITHUB_TOKEN }}

3
.gitmodules vendored Normal file
View File

@ -0,0 +1,3 @@
[submodule "build"]
path = build
url = https://github.com/upbound/build

View File

@ -1,265 +1,202 @@
version: "2"
run:
timeout: 10m
skip-files:
- "zz_generated\\..+\\.go$"
output:
formats:
text:
path: stderr
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
format: colored-line-number
linters-settings:
errcheck:
# report about not checking of errors in type assetions: `a := b.(MyStruct)`;
# default is false: such cases aren't reported by default.
check-type-assertions: false
# report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`;
# default is false: such cases aren't reported by default.
check-blank: false
# [deprecated] comma-separated list of pairs of the form pkg:regex
# the regex is used to ignore names within pkg. (default "fmt:.*").
# see https://github.com/kisielk/errcheck#the-deprecated-method for details
ignore: fmt:.*,io/ioutil:^Read.*
govet:
# report about shadowed variables
check-shadowing: false
golint:
# minimal confidence for issues, default is 0.8
min-confidence: 0.8
gofmt:
# simplify code: gofmt with `-s` option, true by default
simplify: true
goimports:
# put imports beginning with prefix after 3rd-party packages;
# it's a comma-separated list of prefixes
local-prefixes: github.com/crossplane/crossplane
gocyclo:
# minimal code complexity to report, 30 by default (but we recommend 10-20)
min-complexity: 10
maligned:
# print struct with more effective memory layout or not, false by default
suggest-new: true
dupl:
# tokens count to trigger issue, 150 by default
threshold: 100
goconst:
# minimal length of string constant, 3 by default
min-len: 3
# minimal occurrences count to trigger, 3 by default
min-occurrences: 5
lll:
# tab width in spaces. Default to 1.
tab-width: 1
unused:
# treat code as a program (not a library) and report unused exported identifiers; default is false.
# XXX: if you enable this setting, unused will report a lot of false-positives in text editors:
# if it's called for subdir of a project it can't find funcs usages. All text editor integrations
# with golangci-lint call it on a directory with the changed file.
check-exported: false
unparam:
# Inspect exported functions, default is false. Set to true if no external program/library imports your code.
# XXX: if you enable this setting, unparam will report a lot of false-positives in text editors:
# if it's called for subdir of a project it can't find external interfaces. All text editor integrations
# with golangci-lint call it on a directory with the changed file.
check-exported: false
nakedret:
# make an issue if func has more lines of code than this setting and it has naked returns; default is 30
max-func-lines: 30
prealloc:
# XXX: we don't recommend using this linter before doing performance profiling.
# For most programs usage of prealloc will be a premature optimization.
# Report preallocation suggestions only on simple loops that have no returns/breaks/continues/gotos in them.
# True by default.
simple: true
range-loops: true # Report preallocation suggestions on range loops, true by default
for-loops: false # Report preallocation suggestions on for loops, false by default
gocritic:
# Enable multiple checks by tags, run `GL_DEBUG=gocritic golangci-lint` run to see all tags and checks.
# Empty list by default. See https://github.com/go-critic/go-critic#usage -> section "Tags".
enabled-tags:
- performance
settings: # settings passed to gocritic
captLocal: # must be valid enabled check name
paramsOnly: true
rangeValCopy:
sizeThreshold: 32
linters:
default: all
disable:
# These are linters we'd like to enable, but that will be labor intensive to
# make existing code compliant.
- wrapcheck
- varnamelen
- testpackage
- paralleltest
- nilnil
- funcorder
# Below are linters that lint for things we don't value. Each entry below
# this line must have a comment explaining the rationale.
# These linters add whitespace in an attempt to make code more readable.
# This isn't a widely accepted Go best practice, and would be laborious to
# apply to existing code.
- wsl
- nlreturn
# Warns about uses of fmt.Sprintf that are less performant than alternatives
# such as string concatenation. We value readability more than performance
# unless performance is measured to be an issue.
- perfsprint
# This linter:
#
# 1. Requires errors.Is/errors.As to test equality.
# 2. Requires all errors be wrapped with fmt.Errorf specifically.
# 3. Disallows errors.New inline - requires package level errors.
#
# 1 is covered by other linters. 2 is covered by wrapcheck, which can also
# handle our use of crossplane-runtime's errors package. 3 is more strict
# than we need. Not every error needs to be tested for equality.
- err113
# These linters duplicate gocognit, but calculate complexity differently.
enable:
- megacheck
- govet
- gocyclo
- cyclop
- nestif
- funlen
- maintidx
- gocritic
- interfacer
- goconst
- goimports
- gofmt # We enable this as well as goimports for its simplify mode.
- prealloc
- golint
- unconvert
- misspell
- nakedret
# Enforces max line length. It's not idiomatic to enforce a strict limit on
# line length in Go. We'd prefer to lint for things that often cause long
# lines, like functions with too many parameters or long parameter names
# that duplicate their types.
- lll
presets:
- bugs
- unused
fast: false
# Warns about struct instantiations that don't specify every field. Could be
# useful in theory to catch fields that are accidentally omitted. Seems like
# it would have many more false positives than useful catches, though.
- exhaustruct
# Warns about TODO comments. The rationale being they should be issues
# instead. We're okay with using TODO to track minor cleanups for next time
# we touch a particular file.
- godox
# Warns about duplicated code blocks within the same file. Could be useful
# to prompt folks to think about whether code should be broken out into a
# function, but generally we're less worried about DRY and fine with a
# little copying. We don't want to give folks the impression that we require
# every duplicated code block to be factored out into a function.
- dupl
# Warns about returning interfaces rather than concrete types. We do think
# it's best to avoid returning interfaces where possible. However, at the
# time of writing enabling this linter would only catch the (many) cases
# where we must return an interface.
- ireturn
# Warns about returning named variables. We do think it's best to avoid
# returning named variables where possible. However, at the time of writing
# enabling this linter would only catch the (many) cases where returning
# named variables is useful to document what the variables are. For example
# we believe it makes sense to return (ready bool) rather than just (bool)
# to communicate what the bool means.
- nonamedreturns
# Warns about using magic numbers. We do think it's best to avoid magic
# numbers, but we should not be strict about it.
- mnd
# Warns about if err := Foo(); err != nil style error checks. Seems to go
# against idiomatic Go programming, which encourages this approach - e.g.
# to scope errors.
- noinlineerr
settings:
depguard:
rules:
no_third_party_test_libraries:
list-mode: lax
files:
- $test
deny:
- pkg: github.com/stretchr/testify
desc: See https://go.dev/wiki/TestComments#assert-libraries
- pkg: github.com/onsi/ginkgo
desc: See https://go.dev/wiki/TestComments#assert-libraries
- pkg: github.com/onsi/gomega
desc: See https://go.dev/wiki/TestComments#assert-libraries
dupl:
threshold: 100
errcheck:
check-type-assertions: false
check-blank: false
goconst:
min-len: 3
min-occurrences: 5
gocritic:
enabled-tags:
- performance
settings:
captLocal:
paramsOnly: true
rangeValCopy:
sizeThreshold: 32
govet:
disable:
- shadow
interfacebloat:
max: 5
lll:
tab-width: 1
nakedret:
max-func-lines: 30
nolintlint:
require-explanation: true
require-specific: true
prealloc:
simple: true
range-loops: true
for-loops: false
tagliatelle:
case:
rules:
json: goCamel
unparam:
check-exported: false
unused:
exported-fields-are-used: true
exclusions:
generated: lax
rules:
- linters:
- containedctx
- errcheck
- forcetypeassert
- gochecknoglobals
- gochecknoinits
- gocognit
- gosec
- scopelint
- unparam
- embeddedstructfieldcheck
path: _test(ing)?\.go
- linters:
- gocritic
path: _test\.go
text: (unnamedResult|exitAfterDefer)
# It's idiomatic to register Kubernetes types with a package scoped
# SchemeBuilder using an init function.
- linters:
- gochecknoglobals
- gochecknoinits
path: apis/
# These are performance optimisations rather than style issues per se.
# They warn when function arguments or range values copy a lot of memory
# rather than using a pointer.
- linters:
- gocritic
text: '(hugeParam|rangeValCopy):'
# This "TestMain should call os.Exit to set exit code" warning is not clever
# enough to notice that we call a helper method that calls os.Exit.
- linters:
- staticcheck
text: 'SA3000:'
# This is a "potential hardcoded credentials" warning. It's triggered by
# any variable with 'secret' in the same, and thus hits a lot of false
# positives in Kubernetes land where a Secret is an object type.
- linters:
- gosec
text: 'G101:'
# This is an 'errors unhandled' warning that duplicates errcheck.
- linters:
- gosec
text: 'G104:'
# This is about implicit memory aliasing in a range loop.
# This is a false positive with Go v1.22 and above.
- linters:
- gosec
text: 'G601:'
# Some k8s dependencies do not have JSON tags on all fields in structs.
- linters:
- musttag
path: k8s.io/
# Various fields related to native patch and transform Composition are
# deprecated, but we can't drop support from Crossplane 1.x. We ignore the
# warnings globally instead of suppressing them with comments everywhere.
- linters:
- staticcheck
text: 'SA1019: .+ is deprecated: Use Composition Functions instead.'
# Some shared structs in apis/common/v1 are moved to
# apis/common. To preserve a backward-compatible directory structure
# package had to be named common, which we suppress.
- linters:
- revive
text: "var-naming: avoid meaningless package names"
path: apis/common
paths:
- zz_generated\..+\.go$
- .+\.pb.go$
- third_party$
- builtin$
- examples$
issues:
max-issues-per-linter: 0
max-same-issues: 0
# Excluding configuration per-path and per-linter
exclude-rules:
# Exclude some linters from running on tests files.
- path: _test(ing)?\.go
linters:
- gocyclo
- errcheck
- dupl
- gosec
- scopelint
- unparam
# Ease some gocritic warnings on test files.
- path: _test\.go
text: "(unnamedResult|exitAfterDefer)"
linters:
- gocritic
# These are performance optimisations rather than style issues per se.
# They warn when function arguments or range values copy a lot of memory
# rather than using a pointer.
- text: "(hugeParam|rangeValCopy):"
linters:
- gocritic
# This "TestMain should call os.Exit to set exit code" warning is not clever
# enough to notice that we call a helper method that calls os.Exit.
- text: "SA3000:"
linters:
- staticcheck
- text: "k8s.io/api/core/v1"
linters:
- goimports
# This is a "potential hardcoded credentials" warning. It's triggered by
# any variable with 'secret' in the same, and thus hits a lot of false
# positives in Kubernetes land where a Secret is an object type.
- text: "G101:"
linters:
- gosec
- gas
# This is an 'errors unhandled' warning that duplicates errcheck.
- text: "G104:"
linters:
- gosec
- gas
# The Azure AddToUserAgent method appends to the existing user agent string.
# It returns an error if you pass it an empty string lettinga you know the
# user agent did not change, making it more of a warning.
- text: \.AddToUserAgent
linters:
- errcheck
# Independently from option `exclude` we use default exclude patterns,
# it can be disabled by this option. To list all
# excluded by default patterns execute `golangci-lint run --help`.
# Default value for this option is true.
exclude-use-default: false
# Show only new issues: if there are unstaged changes or untracked files,
# only those changes are analyzed, else only changes in HEAD~ are analyzed.
# It's a super-useful option for integration of golangci-lint into existing
# large codebase. It's not practical to fix all existing issues at the moment
# of integration: much better don't allow issues in new code.
# Default is false.
new: false
formatters:
enable:
- gci
- gofmt
- gofumpt
- goimports
settings:
gci:
sections:
- standard
- default
- prefix(github.com/crossplane/crossplane-runtime)
- blank
- dot
custom-order: true
gofmt:
simplify: true
exclusions:
generated: lax
paths:
- zz_generated\..+\.go$
- .+\.pb.go$
- third_party$
- builtin$
- examples$
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
max-per-linter: 0
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
max-same-issues: 0

View File

@ -1,30 +0,0 @@
# This file controls automatic PR reviewer assignment. See the following docs:
#
# * https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners
# * https://docs.github.com/en/organizations/organizing-members-into-teams/managing-code-review-settings-for-your-team
#
# The goal of this file is for most PRs to automatically and fairly have one
# maintainer and two reviewers set as PR reviewers. All maintainers have
# permission to approve and merge PRs, but reviewers do not. Most PRs should be
# reviewed by members of the reviewers group before being passed to a maintainer
# for final review.
#
# This in part depends on how the groups in this file are configured.
#
# @crossplane/steering-committee - Assigns 3 members. Admin perms to this repo.
# @crossplane/crossplane-maintainers - Assigns 1 member. Maintain perms to this repo.
# @crossplane/crossplane-reviewers - Assigns 2 members. Write perms to this repo.
#
# Where possible, prefer explicitly specifying a maintainer who is a subject
# matter expert for a particular part of the codebase rather than using the
# @crossplane/crossplane-maintainers group.
#
# See also OWNERS.md for governance details
# Fallback owners
* @crossplane/crossplane-maintainers
# Governance owners - steering committee
/README.md @crossplane/steering-committee
/OWNERS.md @crossplane/steering-committee
/LICENSE @crossplane/steering-committee

153
Earthfile
View File

@ -1,153 +0,0 @@
# See https://docs.earthly.dev/docs/earthfile/features
VERSION --try --raw-output 0.8
PROJECT crossplane/crossplane-runtime
ARG --global GO_VERSION=1.24.4
# reviewable checks that a branch is ready for review. Run it before opening a
# pull request. It will catch a lot of the things our CI workflow will catch.
reviewable:
WAIT
BUILD +generate
END
BUILD +lint
BUILD +test
# test runs unit tests.
test:
BUILD +go-test
# lint runs linters.
lint:
BUILD +go-lint
# build builds Crossplane for your native OS and architecture.
build:
BUILD +go-build
# multiplatform-build builds Crossplane for all supported OS and architectures.
multiplatform-build:
BUILD +go-multiplatform-build
# generate runs code generation. To keep builds fast, it doesn't run as part of
# the build target. It's important to run it explicitly when code needs to be
# generated, for example when you update an API type.
generate:
BUILD +go-modules-tidy
BUILD +go-generate
# go-modules downloads Crossplane's go modules. It's the base target of most Go
# related target (go-build, etc).
go-modules:
ARG NATIVEPLATFORM
FROM --platform=${NATIVEPLATFORM} golang:${GO_VERSION}
WORKDIR /crossplane
CACHE --id go-build --sharing shared /root/.cache/go-build
COPY go.mod go.sum ./
RUN go mod download
SAVE ARTIFACT go.mod AS LOCAL go.mod
SAVE ARTIFACT go.sum AS LOCAL go.sum
# go-modules-tidy tidies and verifies go.mod and go.sum.
go-modules-tidy:
FROM +go-modules
CACHE --id go-build --sharing shared /root/.cache/go-build
COPY --dir apis/ pkg/ .
RUN go mod tidy
RUN go mod verify
SAVE ARTIFACT go.mod AS LOCAL go.mod
SAVE ARTIFACT go.sum AS LOCAL go.sum
# go-generate runs Go code generation.
go-generate:
FROM +go-modules
CACHE --id go-build --sharing shared /root/.cache/go-build
COPY --dir apis/ hack/ .
RUN go generate -tags 'generate' ./apis/...
SAVE ARTIFACT apis/ AS LOCAL apis
# go-build builds Crossplane binaries for your native OS and architecture.
go-build:
ARG TARGETARCH
ARG TARGETOS
ARG GOARCH=${TARGETARCH}
ARG GOOS=${TARGETOS}
ARG CGO_ENABLED=0
FROM +go-modules
CACHE --id go-build --sharing shared /root/.cache/go-build
COPY --dir apis/ pkg/ .
RUN go build ./...
# go-multiplatform-build builds Crossplane binaries for all supported OS
# and architectures.
go-multiplatform-build:
BUILD \
--platform=linux/amd64 \
--platform=linux/arm64 \
--platform=linux/arm \
--platform=linux/ppc64le \
--platform=darwin/arm64 \
--platform=darwin/amd64 \
--platform=windows/amd64 \
+go-build
# go-test runs Go unit tests.
go-test:
FROM +go-modules
CACHE --id go-build --sharing shared /root/.cache/go-build
COPY --dir apis/ pkg/ .
RUN go test -covermode=count -coverprofile=coverage.txt ./...
SAVE ARTIFACT coverage.txt AS LOCAL _output/tests/coverage.txt
# go-lint lints Go code.
go-lint:
ARG GOLANGCI_LINT_VERSION=v2.2.1
FROM +go-modules
# This cache is private because golangci-lint doesn't support concurrent runs.
CACHE --id go-lint --sharing private /root/.cache/golangci-lint
CACHE --id go-build --sharing shared /root/.cache/go-build
RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin ${GOLANGCI_LINT_VERSION}
COPY .golangci.yml .
COPY --dir apis/ pkg/ .
RUN golangci-lint run --fix
SAVE ARTIFACT apis AS LOCAL apis
SAVE ARTIFACT pkg AS LOCAL pkg
# Targets below this point are intended only for use in GitHub Actions CI. They
# may not work outside of that environment. For example they may depend on
# secrets that are only availble in the CI environment. Targets below this point
# must be prefixed with ci-.
# TODO(negz): Is there a better way to determine the Crossplane version?
# This versioning approach maintains compatibility with the build submodule. See
# https://github.com/crossplane/build/blob/231258/makelib/common.mk#L205. This
# approach is problematic in Earthly because computing it inside a containerized
# target requires copying the entire git repository into the container. Doing so
# would invalidate all dependent target caches any time any file in git changed.
# ci-codeql-setup sets up CodeQL for the ci-codeql target.
ci-codeql-setup:
ARG CODEQL_VERSION=v2.20.5
FROM curlimages/curl:8.8.0
RUN curl -fsSL https://github.com/github/codeql-action/releases/download/codeql-bundle-${CODEQL_VERSION}/codeql-bundle-linux64.tar.gz|tar zx
SAVE ARTIFACT codeql
# ci-codeql is used by CI to build Crossplane with CodeQL scanning enabled.
ci-codeql:
ARG CGO_ENABLED=0
ARG TARGETOS
ARG TARGETARCH
# Using a static CROSSPLANE_VERSION allows Earthly to cache E2E runs as long
# as no code changed. If the version contains a git commit (the default) the
# build layer cache is invalidated on every commit.
FROM +go-modules --CROSSPLANE_VERSION=v0.0.0-codeql
IF [ "${TARGETARCH}" = "arm64" ] && [ "${TARGETOS}" = "linux" ]
RUN --no-cache echo "CodeQL doesn't support Linux on Apple Silicon" && false
END
COPY --dir +ci-codeql-setup/codeql /codeql
CACHE --id go-build --sharing shared /root/.cache/go-build
COPY --dir apis/ pkg/ .
RUN /codeql/codeql database create /codeqldb --language=go
RUN /codeql/codeql database analyze /codeqldb --threads=0 --format=sarif-latest --output=go.sarif --sarif-add-baseline-file-info
SAVE ARTIFACT go.sarif AS LOCAL _output/codeql/go.sarif

13
INSTALL.md Normal file
View File

@ -0,0 +1,13 @@
# Crossplane-Runtime Setup
## Requirements
An Intel-based machine (recommend 2+ cores, 2+ GB of memory and 128GB of SSD). Inside your build environment (Docker for Mac or a VM), 6+ GB memory is also recommended.
The following tools are need on the host:
- curl
- git
- make
- golang
- kubebuilder (v1.0.4+)

84
Makefile Normal file
View File

@ -0,0 +1,84 @@
# ====================================================================================
# Setup Project
PROJECT_NAME := crossplane-runtime
PROJECT_REPO := github.com/crossplane/$(PROJECT_NAME)
PLATFORMS ?= linux_amd64 linux_arm64
# -include will silently skip missing files, which allows us
# to load those files with a target in the Makefile. If only
# "include" was used, the make command would fail and refuse
# to run a target until the include commands succeeded.
-include build/makelib/common.mk
# ====================================================================================
# Setup Images
# even though this repo doesn't build images (note the no-op img.build target below),
# some of the init is needed for the cross build container, e.g. setting BUILD_REGISTRY
-include build/makelib/image.mk
img.build:
# ====================================================================================
# Setup Go
# Set a sane default so that the nprocs calculation below is less noisy on the initial
# loading of this file
NPROCS ?= 1
# each of our test suites starts a kube-apiserver and running many test suites in
# parallel can lead to high CPU utilization. by default we reduce the parallelism
# to half the number of CPU cores.
GO_TEST_PARALLEL := $(shell echo $$(( $(NPROCS) / 2 )))
GO_LDFLAGS += -X $(GO_PROJECT)/pkg/version.Version=$(VERSION)
GO_SUBDIRS += pkg apis
GO111MODULE = on
-include build/makelib/golang.mk
# ====================================================================================
# Targets
# run `make help` to see the targets and options
# We want submodules to be set up the first time `make` is run.
# We manage the build/ folder and its Makefiles as a submodule.
# The first time `make` is run, the includes of build/*.mk files will
# all fail, and this target will be run. The next time, the default as defined
# by the includes will be run instead.
fallthrough: submodules
@echo Initial setup complete. Running make again . . .
@make
# Generate a coverage report for cobertura applying exclusions on
# - generated file
cobertura:
@cat $(GO_TEST_OUTPUT)/coverage.txt | \
grep -v zz_generated.deepcopy | \
$(GOCOVER_COBERTURA) > $(GO_TEST_OUTPUT)/cobertura-coverage.xml
# Update the submodules, such as the common build scripts.
submodules:
@git submodule sync
@git submodule update --init --recursive
.PHONY: cobertura reviewable submodules fallthrough
# ====================================================================================
# Special Targets
define CROSSPLANE_RUNTIME_HELP
Crossplane Runtime Targets:
cobertura Generate a coverage report for cobertura applying exclusions on generated files.
reviewable Ensure a PR is ready for review.
submodules Update the submodules, such as the common build scripts.
endef
export CROSSPLANE_RUNTIME_HELP
crossplane-runtime.help:
@echo "$$CROSSPLANE_RUNTIME_HELP"
help-special: crossplane-runtime.help
.PHONY: crossplane-runtime.help help-special

View File

@ -1,30 +1,16 @@
# Crossplane Maintainers
# OWNERS
This page lists all active maintainers and reviewers for **this** repository.
Each repository in the [Crossplane organization](https://github.com/crossplane/)
will list their repository maintainers and reviewers in their own `OWNERS.md`
file.
Please see [GOVERNANCE.md](https://github.com/crossplane/crossplane/blob/main/GOVERNANCE.md)
for governance guidelines and responsibilities for maintainers, and reviewers.
See [CODEOWNERS](CODEOWNERS) for automatic PR assignment.
This page lists all maintainers for **this** repository. Each repository in the [Crossplane
organization](https://github.com/crossplane/) will list their repository maintainers in their own
`OWNERS.md` file.
Please see the Crossplane
[GOVERNANCE.md](https://github.com/crossplane/crossplane/blob/master/GOVERNANCE.md) for governance
guidelines and responsibilities for the steering committee and maintainers.
## Maintainers
* Nic Cope <negz@upbound.io> ([negz](https://github.com/negz))
* Hasan Turken <hasan@upbound.io> ([turkenh](https://github.com/turkenh))
* Bob Haddleton <bob.haddleton@nokia.com> ([bobh66](https://github.com/bobh66))
* Philippe Scorsolini <philippe.scorsolini@upbound.io> ([phisco](https://github.com/phisco))
## Reviewers
* Yury Tsarev <yury@upbound.io> ([ytsarev](https://github.com/ytsarev))
* Ezgi Demirel <ezgi@upbound.io> ([ezgidemirel](https://github.com/ezgidemirel))
* Max Blatt ([MisterMX](https://github.com/MisterMX))
## Emeritus maintainers
* Jared Watts <jared@upbound.io> ([jbw976](https://github.com/jbw976))
* Illya Chekrygin <illya.chekrygin@gmail.com> ([ichekrygin](https://github.com/ichekrygin))
* Daniel Mangum <dan@upbound.io> ([hasheddan](https://github.com/hasheddan))
* Muvaffak Onuş <monus@upbound.io> ([muvaf](https://github.com/muvaf))
* Hasan Türken <hasan@upbound.io> ([turkenh](https://github.com/turkenh))

View File

@ -1,5 +1,4 @@
# crossplane-runtime
[![CI](https://github.com/crossplane/crossplane-runtime/actions/workflows/ci.yml/badge.svg)](https://github.com/crossplane/crossplane-runtime/actions/workflows/ci.yml) ![GitHub release (latest SemVer)](https://img.shields.io/github/v/release/crossplane/crossplane-runtime) [![Godoc](https://img.shields.io/badge/godoc-reference-blue.svg)](https://godoc.org/github.com/crossplane/crossplane-runtime)
# crossplane-runtime [![Godoc](https://img.shields.io/badge/godoc-reference-blue.svg)](https://godoc.org/github.com/crossplane/crossplane-runtime)
## Overview
@ -47,15 +46,15 @@ crossplane-runtime is under the Apache 2.0 license.
[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fcrossplane%2Fcrossplane-runtime.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fcrossplane%2Fcrossplane-runtime?ref=badge_large)
[developer guide]: https://github.com/crossplane/crossplane/tree/main/contributing
[developer guide]: https://crossplane.io/docs/master/contributing/overview.html
[API documentation]: https://godoc.org/github.com/crossplane/crossplane-runtime
[contributing]: https://github.com/crossplane/crossplane/blob/main/CONTRIBUTING.md
[contributing]: https://github.com/crossplane/crossplane/blob/master/CONTRIBUTING.md
[issue]: https://github.com/crossplane/crossplane-runtime/issues
[slack channel]: https://slack.crossplane.io
[crossplane-dev]: https://groups.google.com/forum/#!forum/crossplane-dev
[@crossplane_io]: https://twitter.com/crossplane_io
[info@crossplane.io]: mailto:info@crossplane.io
[roadmap]: https://github.com/crossplane/crossplane/blob/main/ROADMAP.md
[governance]: https://github.com/crossplane/crossplane/blob/main/GOVERNANCE.md
[ownership]: https://github.com/crossplane/crossplane/blob/main/OWNERS.md
[code of conduct]: https://github.com/crossplane/crossplane/blob/main/CODE_OF_CONDUCT.md
[roadmap]: https://github.com/crossplane/crossplane/blob/master/ROADMAP.md
[governance]: https://github.com/crossplane/crossplane/blob/master/GOVERNANCE.md
[ownership]: https://github.com/crossplane/crossplane/blob/master/OWNERS.md
[code of conduct]: https://github.com/crossplane/crossplane/blob/master/CODE_OF_CONDUCT.md

View File

@ -1,25 +0,0 @@
# Release Process
## New Patch Release (vX.Y.Z)
In order to cut a new patch release from an existing release branch `release-X.Y`, follow these steps:
- Run the [Tag workflow][tag-workflow] on the `release-X.Y` branch with the proper release version, `vX.Y.Z`. Message suggested, but not required: `Release vX.Y.Z`.
- Draft the [new release notes], and share them with the rest of the team to ensure that all the required information is included.
- Publish the above release notes.
## New Minor Release (vX.Y.0)
In order to cut a new minor release, follow these steps:
- Create a new release branch `release-X.Y` from `main`, using the [GitHub UI][create-branch].
- Create and merge an empty commit to the `main` branch, if required to have it at least one commit ahead of the release branch.
- Run the [Tag workflow][tag-workflow] on the `main` branch with the release candidate tag for the next release, so `vX.<Y+1>.0-rc.0`.
- Run the [Tag workflow][tag-workflow] on the `release-X.Y` branch with the proper release version, `vX.Y.0`. Message suggested, but not required: `Release vX.Y.0`.
- Draft the [new release notes], and share them with the rest of the team to ensure that all the required information is included.
- Publish the above release notes.
<!-- Named Links -->
[create-branch]: https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-and-deleting-branches-within-your-repository
[new release notes]: https://github.com/crossplane/crossplane-runtime/releases/new
[tag-workflow]: https://github.com/crossplane/crossplane-runtime/actions/workflows/tag.yml

View File

@ -1,7 +0,0 @@
# Security Policy
## Reporting a Vulnerability
Instructions for reporting a vulnerability can be found on the
[crossplane repository](https://github.com/crossplane/crossplane/blob/main/SECURITY.md).

View File

@ -1,4 +1,3 @@
//go:build generate
// +build generate
/*
@ -23,25 +22,9 @@ limitations under the License.
// Generate deepcopy methodsets
//go:generate go run -tags generate sigs.k8s.io/controller-tools/cmd/controller-gen object:headerFile=../hack/boilerplate.go.txt paths=./...
// Generate External Secret Store gRPC types and stubs.
//
// We use buf rather than the traditional protoc because it's pure go and can
// thus be invoked using go run from a pinned dependency. If we used protoc we'd
// need to install it via the Makefile, and there are not currently statically
// compiled binaries available for download (the release binaries for Linux are
// dynamically linked). See buf.gen.yaml for buf's configuration.
//
// We go install the required plugins because they need to be in $PATH for buf
// (or protoc) to invoke them.
//go:generate go install google.golang.org/protobuf/cmd/protoc-gen-go google.golang.org/grpc/cmd/protoc-gen-go-grpc
//go:generate go run github.com/bufbuild/buf/cmd/buf@v1.36.0 generate
// Package apis contains Kubernetes API groups
package apis
import (
_ "google.golang.org/grpc/cmd/protoc-gen-go-grpc" //nolint:typecheck
_ "google.golang.org/protobuf/cmd/protoc-gen-go" //nolint:typecheck
_ "sigs.k8s.io/controller-tools/cmd/controller-gen" //nolint:typecheck
)

View File

@ -1,10 +0,0 @@
# This file contains configuration for the `buf generate` command.
# See generate.go for more details.
version: v1
plugins:
- plugin: go
out: .
opt: paths=source_relative
- plugin: go-grpc
out: .
opt: paths=source_relative

View File

@ -1,9 +0,0 @@
version: v1
name: buf.build/crossplane/crossplane-runtime
breaking:
use:
- FILE
lint:
use:
- DEFAULT
allow_comment_ignores: true

View File

@ -1,453 +0,0 @@
//
//Copyright 2024 The Crossplane Authors.
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//http://www.apache.org/licenses/LICENSE-2.0
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.5
// protoc (unknown)
// source: changelogs/proto/v1alpha1/changelog.proto
// buf:lint:ignore PACKAGE_DIRECTORY_MATCH
package v1alpha1
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
structpb "google.golang.org/protobuf/types/known/structpb"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// OperationType represents the type of operation that was performed on a
// resource.
type OperationType int32
const (
OperationType_OPERATION_TYPE_UNSPECIFIED OperationType = 0
OperationType_OPERATION_TYPE_CREATE OperationType = 1
OperationType_OPERATION_TYPE_UPDATE OperationType = 2
OperationType_OPERATION_TYPE_DELETE OperationType = 3
)
// Enum value maps for OperationType.
var (
OperationType_name = map[int32]string{
0: "OPERATION_TYPE_UNSPECIFIED",
1: "OPERATION_TYPE_CREATE",
2: "OPERATION_TYPE_UPDATE",
3: "OPERATION_TYPE_DELETE",
}
OperationType_value = map[string]int32{
"OPERATION_TYPE_UNSPECIFIED": 0,
"OPERATION_TYPE_CREATE": 1,
"OPERATION_TYPE_UPDATE": 2,
"OPERATION_TYPE_DELETE": 3,
}
)
func (x OperationType) Enum() *OperationType {
p := new(OperationType)
*p = x
return p
}
func (x OperationType) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (OperationType) Descriptor() protoreflect.EnumDescriptor {
return file_changelogs_proto_v1alpha1_changelog_proto_enumTypes[0].Descriptor()
}
func (OperationType) Type() protoreflect.EnumType {
return &file_changelogs_proto_v1alpha1_changelog_proto_enumTypes[0]
}
func (x OperationType) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use OperationType.Descriptor instead.
func (OperationType) EnumDescriptor() ([]byte, []int) {
return file_changelogs_proto_v1alpha1_changelog_proto_rawDescGZIP(), []int{0}
}
// SendChangeLogRequest represents a request to send a single change log entry.
type SendChangeLogRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
// The change log entry to send as part of this request.
Entry *ChangeLogEntry `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SendChangeLogRequest) Reset() {
*x = SendChangeLogRequest{}
mi := &file_changelogs_proto_v1alpha1_changelog_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SendChangeLogRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SendChangeLogRequest) ProtoMessage() {}
func (x *SendChangeLogRequest) ProtoReflect() protoreflect.Message {
mi := &file_changelogs_proto_v1alpha1_changelog_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SendChangeLogRequest.ProtoReflect.Descriptor instead.
func (*SendChangeLogRequest) Descriptor() ([]byte, []int) {
return file_changelogs_proto_v1alpha1_changelog_proto_rawDescGZIP(), []int{0}
}
func (x *SendChangeLogRequest) GetEntry() *ChangeLogEntry {
if x != nil {
return x.Entry
}
return nil
}
// ChangeLogEntry represents a single change log entry, with detailed information
// about the resource that was changed.
type ChangeLogEntry struct {
state protoimpl.MessageState `protogen:"open.v1"`
// The timestamp at which the change occurred.
Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
// The name and version of the provider that is making the change to the
// resource.
Provider string `protobuf:"bytes,2,opt,name=provider,proto3" json:"provider,omitempty"`
// The API version of the resource that was changed, e.g. Group/Version.
ApiVersion string `protobuf:"bytes,3,opt,name=api_version,json=apiVersion,proto3" json:"api_version,omitempty"`
// The kind of the resource that was changed.
Kind string `protobuf:"bytes,4,opt,name=kind,proto3" json:"kind,omitempty"`
// The name of the resource that was changed.
Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"`
// The external name of the resource that was changed.
ExternalName string `protobuf:"bytes,6,opt,name=external_name,json=externalName,proto3" json:"external_name,omitempty"`
// The type of operation that was performed on the resource, e.g. Create,
// Update, or Delete.
Operation OperationType `protobuf:"varint,7,opt,name=operation,proto3,enum=changelogs.proto.v1alpha1.OperationType" json:"operation,omitempty"`
// A full snapshot of the resource's state, as observed directly before the
// resource was changed.
Snapshot *structpb.Struct `protobuf:"bytes,8,opt,name=snapshot,proto3" json:"snapshot,omitempty"`
// An optional error message that describes any error encountered while
// performing the operation on the resource.
ErrorMessage *string `protobuf:"bytes,9,opt,name=error_message,json=errorMessage,proto3,oneof" json:"error_message,omitempty"`
// An optional additional details that can be provided for further context
// about the change.
AdditionalDetails map[string]string `protobuf:"bytes,10,rep,name=additional_details,json=additionalDetails,proto3" json:"additional_details,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ChangeLogEntry) Reset() {
*x = ChangeLogEntry{}
mi := &file_changelogs_proto_v1alpha1_changelog_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ChangeLogEntry) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ChangeLogEntry) ProtoMessage() {}
func (x *ChangeLogEntry) ProtoReflect() protoreflect.Message {
mi := &file_changelogs_proto_v1alpha1_changelog_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ChangeLogEntry.ProtoReflect.Descriptor instead.
func (*ChangeLogEntry) Descriptor() ([]byte, []int) {
return file_changelogs_proto_v1alpha1_changelog_proto_rawDescGZIP(), []int{1}
}
func (x *ChangeLogEntry) GetTimestamp() *timestamppb.Timestamp {
if x != nil {
return x.Timestamp
}
return nil
}
func (x *ChangeLogEntry) GetProvider() string {
if x != nil {
return x.Provider
}
return ""
}
func (x *ChangeLogEntry) GetApiVersion() string {
if x != nil {
return x.ApiVersion
}
return ""
}
func (x *ChangeLogEntry) GetKind() string {
if x != nil {
return x.Kind
}
return ""
}
func (x *ChangeLogEntry) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *ChangeLogEntry) GetExternalName() string {
if x != nil {
return x.ExternalName
}
return ""
}
func (x *ChangeLogEntry) GetOperation() OperationType {
if x != nil {
return x.Operation
}
return OperationType_OPERATION_TYPE_UNSPECIFIED
}
func (x *ChangeLogEntry) GetSnapshot() *structpb.Struct {
if x != nil {
return x.Snapshot
}
return nil
}
func (x *ChangeLogEntry) GetErrorMessage() string {
if x != nil && x.ErrorMessage != nil {
return *x.ErrorMessage
}
return ""
}
func (x *ChangeLogEntry) GetAdditionalDetails() map[string]string {
if x != nil {
return x.AdditionalDetails
}
return nil
}
// SendChangeLogResponse is the response returned by the ChangeLogService after
// a change log entry is sent. Currently, this is an empty message as the only
// useful information expected to sent back at this time will be through errors.
type SendChangeLogResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *SendChangeLogResponse) Reset() {
*x = SendChangeLogResponse{}
mi := &file_changelogs_proto_v1alpha1_changelog_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *SendChangeLogResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SendChangeLogResponse) ProtoMessage() {}
func (x *SendChangeLogResponse) ProtoReflect() protoreflect.Message {
mi := &file_changelogs_proto_v1alpha1_changelog_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SendChangeLogResponse.ProtoReflect.Descriptor instead.
func (*SendChangeLogResponse) Descriptor() ([]byte, []int) {
return file_changelogs_proto_v1alpha1_changelog_proto_rawDescGZIP(), []int{2}
}
var File_changelogs_proto_v1alpha1_changelog_proto protoreflect.FileDescriptor
var file_changelogs_proto_v1alpha1_changelog_proto_rawDesc = string([]byte{
0x0a, 0x29, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x6c, 0x6f, 0x67, 0x73, 0x2f, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x63, 0x68, 0x61, 0x6e,
0x67, 0x65, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x63, 0x68, 0x61,
0x6e, 0x67, 0x65, 0x6c, 0x6f, 0x67, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31,
0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x57, 0x0a, 0x14, 0x53, 0x65, 0x6e, 0x64, 0x43, 0x68, 0x61,
0x6e, 0x67, 0x65, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a,
0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x63,
0x68, 0x61, 0x6e, 0x67, 0x65, 0x6c, 0x6f, 0x67, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x4c,
0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0xc4,
0x04, 0x0a, 0x0e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x4c, 0x6f, 0x67, 0x45, 0x6e, 0x74, 0x72,
0x79, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x1a, 0x0a, 0x08, 0x70,
0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70,
0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70, 0x69, 0x5f, 0x76,
0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70,
0x69, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64,
0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04,
0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
0x12, 0x23, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x6e, 0x61, 0x6d,
0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61,
0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x63, 0x68, 0x61, 0x6e, 0x67,
0x65, 0x6c, 0x6f, 0x67, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x61, 0x6c,
0x70, 0x68, 0x61, 0x31, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79,
0x70, 0x65, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a,
0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68,
0x6f, 0x74, 0x12, 0x28, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73,
0x61, 0x67, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x65, 0x72, 0x72,
0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x88, 0x01, 0x01, 0x12, 0x6f, 0x0a, 0x12,
0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69,
0x6c, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x63, 0x68, 0x61, 0x6e, 0x67,
0x65, 0x6c, 0x6f, 0x67, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x61, 0x6c,
0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x4c, 0x6f, 0x67, 0x45, 0x6e,
0x74, 0x72, 0x79, 0x2e, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x44, 0x65,
0x74, 0x61, 0x69, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x61, 0x64, 0x64, 0x69,
0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x1a, 0x44, 0x0a,
0x16, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x44, 0x65, 0x74, 0x61, 0x69,
0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a,
0x02, 0x38, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65,
0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x17, 0x0a, 0x15, 0x53, 0x65, 0x6e, 0x64, 0x43, 0x68, 0x61,
0x6e, 0x67, 0x65, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2a, 0x80,
0x01, 0x0a, 0x0d, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65,
0x12, 0x1e, 0x0a, 0x1a, 0x4f, 0x50, 0x45, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x59,
0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00,
0x12, 0x19, 0x0a, 0x15, 0x4f, 0x50, 0x45, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x59,
0x50, 0x45, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x10, 0x01, 0x12, 0x19, 0x0a, 0x15, 0x4f,
0x50, 0x45, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x50,
0x44, 0x41, 0x54, 0x45, 0x10, 0x02, 0x12, 0x19, 0x0a, 0x15, 0x4f, 0x50, 0x45, 0x52, 0x41, 0x54,
0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10,
0x03, 0x32, 0x88, 0x01, 0x0a, 0x10, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x4c, 0x6f, 0x67, 0x53,
0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x74, 0x0a, 0x0d, 0x53, 0x65, 0x6e, 0x64, 0x43, 0x68,
0x61, 0x6e, 0x67, 0x65, 0x4c, 0x6f, 0x67, 0x12, 0x2f, 0x2e, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65,
0x6c, 0x6f, 0x67, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70,
0x68, 0x61, 0x31, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x4c, 0x6f,
0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x63, 0x68, 0x61, 0x6e, 0x67,
0x65, 0x6c, 0x6f, 0x67, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x61, 0x6c,
0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x4c,
0x6f, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x49, 0x5a, 0x47,
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x72, 0x6f, 0x73, 0x73,
0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x63, 0x72, 0x6f, 0x73, 0x73, 0x70, 0x6c, 0x61, 0x6e, 0x65,
0x2d, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x63, 0x68,
0x61, 0x6e, 0x67, 0x65, 0x6c, 0x6f, 0x67, 0x73, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76,
0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
})
var (
file_changelogs_proto_v1alpha1_changelog_proto_rawDescOnce sync.Once
file_changelogs_proto_v1alpha1_changelog_proto_rawDescData []byte
)
func file_changelogs_proto_v1alpha1_changelog_proto_rawDescGZIP() []byte {
file_changelogs_proto_v1alpha1_changelog_proto_rawDescOnce.Do(func() {
file_changelogs_proto_v1alpha1_changelog_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_changelogs_proto_v1alpha1_changelog_proto_rawDesc), len(file_changelogs_proto_v1alpha1_changelog_proto_rawDesc)))
})
return file_changelogs_proto_v1alpha1_changelog_proto_rawDescData
}
var file_changelogs_proto_v1alpha1_changelog_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_changelogs_proto_v1alpha1_changelog_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
var file_changelogs_proto_v1alpha1_changelog_proto_goTypes = []any{
(OperationType)(0), // 0: changelogs.proto.v1alpha1.OperationType
(*SendChangeLogRequest)(nil), // 1: changelogs.proto.v1alpha1.SendChangeLogRequest
(*ChangeLogEntry)(nil), // 2: changelogs.proto.v1alpha1.ChangeLogEntry
(*SendChangeLogResponse)(nil), // 3: changelogs.proto.v1alpha1.SendChangeLogResponse
nil, // 4: changelogs.proto.v1alpha1.ChangeLogEntry.AdditionalDetailsEntry
(*timestamppb.Timestamp)(nil), // 5: google.protobuf.Timestamp
(*structpb.Struct)(nil), // 6: google.protobuf.Struct
}
var file_changelogs_proto_v1alpha1_changelog_proto_depIdxs = []int32{
2, // 0: changelogs.proto.v1alpha1.SendChangeLogRequest.entry:type_name -> changelogs.proto.v1alpha1.ChangeLogEntry
5, // 1: changelogs.proto.v1alpha1.ChangeLogEntry.timestamp:type_name -> google.protobuf.Timestamp
0, // 2: changelogs.proto.v1alpha1.ChangeLogEntry.operation:type_name -> changelogs.proto.v1alpha1.OperationType
6, // 3: changelogs.proto.v1alpha1.ChangeLogEntry.snapshot:type_name -> google.protobuf.Struct
4, // 4: changelogs.proto.v1alpha1.ChangeLogEntry.additional_details:type_name -> changelogs.proto.v1alpha1.ChangeLogEntry.AdditionalDetailsEntry
1, // 5: changelogs.proto.v1alpha1.ChangeLogService.SendChangeLog:input_type -> changelogs.proto.v1alpha1.SendChangeLogRequest
3, // 6: changelogs.proto.v1alpha1.ChangeLogService.SendChangeLog:output_type -> changelogs.proto.v1alpha1.SendChangeLogResponse
6, // [6:7] is the sub-list for method output_type
5, // [5:6] is the sub-list for method input_type
5, // [5:5] is the sub-list for extension type_name
5, // [5:5] is the sub-list for extension extendee
0, // [0:5] is the sub-list for field type_name
}
func init() { file_changelogs_proto_v1alpha1_changelog_proto_init() }
func file_changelogs_proto_v1alpha1_changelog_proto_init() {
if File_changelogs_proto_v1alpha1_changelog_proto != nil {
return
}
file_changelogs_proto_v1alpha1_changelog_proto_msgTypes[1].OneofWrappers = []any{}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_changelogs_proto_v1alpha1_changelog_proto_rawDesc), len(file_changelogs_proto_v1alpha1_changelog_proto_rawDesc)),
NumEnums: 1,
NumMessages: 4,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_changelogs_proto_v1alpha1_changelog_proto_goTypes,
DependencyIndexes: file_changelogs_proto_v1alpha1_changelog_proto_depIdxs,
EnumInfos: file_changelogs_proto_v1alpha1_changelog_proto_enumTypes,
MessageInfos: file_changelogs_proto_v1alpha1_changelog_proto_msgTypes,
}.Build()
File_changelogs_proto_v1alpha1_changelog_proto = out.File
file_changelogs_proto_v1alpha1_changelog_proto_goTypes = nil
file_changelogs_proto_v1alpha1_changelog_proto_depIdxs = nil
}

View File

@ -1,88 +0,0 @@
/*
Copyright 2024 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
syntax = "proto3";
import "google/protobuf/struct.proto";
import "google/protobuf/timestamp.proto";
// buf:lint:ignore PACKAGE_DIRECTORY_MATCH
package changelogs.proto.v1alpha1;
option go_package = "github.com/crossplane/crossplane-runtime/apis/changelogs/proto/v1alpha1";
// ChangeLogService is a service that provides the ability to send change log
// entries.
service ChangeLogService {
// SendChangeLog sends a change log entry to the change log service.
rpc SendChangeLog (SendChangeLogRequest) returns (SendChangeLogResponse) {}
}
// SendChangeLogRequest represents a request to send a single change log entry.
message SendChangeLogRequest {
// The change log entry to send as part of this request.
ChangeLogEntry entry = 1;
}
// ChangeLogEntry represents a single change log entry, with detailed information
// about the resource that was changed.
message ChangeLogEntry {
// The timestamp at which the change occurred.
google.protobuf.Timestamp timestamp = 1;
// The name and version of the provider that is making the change to the
// resource.
string provider = 2;
// The API version of the resource that was changed, e.g. Group/Version.
string api_version = 3;
// The kind of the resource that was changed.
string kind = 4;
// The name of the resource that was changed.
string name = 5;
// The external name of the resource that was changed.
string external_name = 6;
// The type of operation that was performed on the resource, e.g. Create,
// Update, or Delete.
OperationType operation = 7;
// A full snapshot of the resource's state, as observed directly before the
// resource was changed.
google.protobuf.Struct snapshot = 8;
// An optional error message that describes any error encountered while
// performing the operation on the resource.
optional string error_message = 9;
// An optional additional details that can be provided for further context
// about the change.
map<string, string> additional_details = 10;
}
// OperationType represents the type of operation that was performed on a
// resource.
enum OperationType {
OPERATION_TYPE_UNSPECIFIED = 0;
OPERATION_TYPE_CREATE = 1;
OPERATION_TYPE_UPDATE = 2;
OPERATION_TYPE_DELETE = 3;
}
// SendChangeLogResponse is the response returned by the ChangeLogService after
// a change log entry is sent. Currently, this is an empty message as the only
// useful information expected to sent back at this time will be through errors.
message SendChangeLogResponse {}

View File

@ -1,125 +0,0 @@
//
//Copyright 2024 The Crossplane Authors.
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//http://www.apache.org/licenses/LICENSE-2.0
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.3.0
// - protoc (unknown)
// source: changelogs/proto/v1alpha1/changelog.proto
// buf:lint:ignore PACKAGE_DIRECTORY_MATCH
package v1alpha1
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
const (
ChangeLogService_SendChangeLog_FullMethodName = "/changelogs.proto.v1alpha1.ChangeLogService/SendChangeLog"
)
// ChangeLogServiceClient is the client API for ChangeLogService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type ChangeLogServiceClient interface {
// SendChangeLog sends a change log entry to the change log service.
SendChangeLog(ctx context.Context, in *SendChangeLogRequest, opts ...grpc.CallOption) (*SendChangeLogResponse, error)
}
type changeLogServiceClient struct {
cc grpc.ClientConnInterface
}
func NewChangeLogServiceClient(cc grpc.ClientConnInterface) ChangeLogServiceClient {
return &changeLogServiceClient{cc}
}
func (c *changeLogServiceClient) SendChangeLog(ctx context.Context, in *SendChangeLogRequest, opts ...grpc.CallOption) (*SendChangeLogResponse, error) {
out := new(SendChangeLogResponse)
err := c.cc.Invoke(ctx, ChangeLogService_SendChangeLog_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// ChangeLogServiceServer is the server API for ChangeLogService service.
// All implementations must embed UnimplementedChangeLogServiceServer
// for forward compatibility
type ChangeLogServiceServer interface {
// SendChangeLog sends a change log entry to the change log service.
SendChangeLog(context.Context, *SendChangeLogRequest) (*SendChangeLogResponse, error)
mustEmbedUnimplementedChangeLogServiceServer()
}
// UnimplementedChangeLogServiceServer must be embedded to have forward compatible implementations.
type UnimplementedChangeLogServiceServer struct {
}
func (UnimplementedChangeLogServiceServer) SendChangeLog(context.Context, *SendChangeLogRequest) (*SendChangeLogResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method SendChangeLog not implemented")
}
func (UnimplementedChangeLogServiceServer) mustEmbedUnimplementedChangeLogServiceServer() {}
// UnsafeChangeLogServiceServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to ChangeLogServiceServer will
// result in compilation errors.
type UnsafeChangeLogServiceServer interface {
mustEmbedUnimplementedChangeLogServiceServer()
}
func RegisterChangeLogServiceServer(s grpc.ServiceRegistrar, srv ChangeLogServiceServer) {
s.RegisterService(&ChangeLogService_ServiceDesc, srv)
}
func _ChangeLogService_SendChangeLog_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(SendChangeLogRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ChangeLogServiceServer).SendChangeLog(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: ChangeLogService_SendChangeLog_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ChangeLogServiceServer).SendChangeLog(ctx, req.(*SendChangeLogRequest))
}
return interceptor(ctx, in, info, handler)
}
// ChangeLogService_ServiceDesc is the grpc.ServiceDesc for ChangeLogService service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var ChangeLogService_ServiceDesc = grpc.ServiceDesc{
ServiceName: "changelogs.proto.v1alpha1.ChangeLogService",
HandlerType: (*ChangeLogServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "SendChangeLog",
Handler: _ChangeLogService_SendChangeLog_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "changelogs/proto/v1alpha1/changelog.proto",
}

View File

@ -1,307 +0,0 @@
/*
Copyright 2019 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"sort"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// A ConditionType represents a condition a resource could be in.
type ConditionType string
// Condition types.
const (
// TypeReady resources are believed to be ready to handle work.
TypeReady ConditionType = "Ready"
// TypeSynced resources are believed to be in sync with the
// Kubernetes resources that manage their lifecycle.
TypeSynced ConditionType = "Synced"
// TypeHealthy resources are believed to be in a healthy state and to have all
// of their child resources in a healthy state. For example, a claim is
// healthy when the claim is synced and the underlying composite resource is
// both synced and healthy. A composite resource is healthy when the composite
// resource is synced and all composed resources are synced and, if
// applicable, healthy (e.g., the composed resource is a composite resource).
// TODO: This condition is not yet implemented. It is currently just reserved
// as a system condition. See the tracking issue for more details
// https://github.com/crossplane/crossplane/issues/5643.
TypeHealthy ConditionType = "Healthy"
)
// A ConditionReason represents the reason a resource is in a condition.
type ConditionReason string
// Reasons a resource is or is not ready.
const (
ReasonAvailable ConditionReason = "Available"
ReasonUnavailable ConditionReason = "Unavailable"
ReasonCreating ConditionReason = "Creating"
ReasonDeleting ConditionReason = "Deleting"
)
// Reasons a resource is or is not synced.
const (
ReasonReconcileSuccess ConditionReason = "ReconcileSuccess"
ReasonReconcileError ConditionReason = "ReconcileError"
ReasonReconcilePaused ConditionReason = "ReconcilePaused"
)
// See https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties
// A Condition that may apply to a resource.
type Condition struct { //nolint:recvcheck // False positive - only has non-pointer methods AFAICT.
// Type of this condition. At most one of each condition type may apply to
// a resource at any point in time.
Type ConditionType `json:"type"`
// Status of this condition; is it currently True, False, or Unknown?
Status corev1.ConditionStatus `json:"status"`
// LastTransitionTime is the last time this condition transitioned from one
// status to another.
LastTransitionTime metav1.Time `json:"lastTransitionTime"`
// A Reason for this condition's last transition from one status to another.
Reason ConditionReason `json:"reason"`
// A Message containing details about this condition's last transition from
// one status to another, if any.
// +optional
Message string `json:"message,omitempty"`
// ObservedGeneration represents the .metadata.generation that the condition was set based upon.
// For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
// with respect to the current state of the instance.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
}
// Equal returns true if the condition is identical to the supplied condition,
// ignoring the LastTransitionTime.
func (c Condition) Equal(other Condition) bool {
return c.Type == other.Type &&
c.Status == other.Status &&
c.Reason == other.Reason &&
c.Message == other.Message &&
c.ObservedGeneration == other.ObservedGeneration
}
// WithMessage returns a condition by adding the provided message to existing
// condition.
func (c Condition) WithMessage(msg string) Condition {
c.Message = msg
return c
}
// WithObservedGeneration returns a condition by adding the provided observed generation
// to existing condition.
func (c Condition) WithObservedGeneration(gen int64) Condition {
c.ObservedGeneration = gen
return c
}
// IsSystemConditionType returns true if the condition is owned by the
// Crossplane system (e.g, Ready, Synced, Healthy).
func IsSystemConditionType(t ConditionType) bool {
switch t {
case TypeReady, TypeSynced, TypeHealthy:
return true
}
return false
}
// NOTE(negz): Conditions are implemented as a slice rather than a map to comply
// with Kubernetes API conventions. Ideally we'd comply by using a map that
// marshalled to a JSON array, but doing so confuses the CRD schema generator.
// https://github.com/kubernetes/community/blob/9bf8cd/contributors/devel/sig-architecture/api-conventions.md#lists-of-named-subobjects-preferred-over-maps
// NOTE(negz): Do not manipulate Conditions directly. Use the Set method.
// A ConditionedStatus reflects the observed status of a resource. Only
// one condition of each type may exist.
type ConditionedStatus struct {
// Conditions of the resource.
// +listType=map
// +listMapKey=type
// +optional
Conditions []Condition `json:"conditions,omitempty"`
}
// NewConditionedStatus returns a stat with the supplied conditions set.
func NewConditionedStatus(c ...Condition) *ConditionedStatus {
s := &ConditionedStatus{}
s.SetConditions(c...)
return s
}
// GetCondition returns the condition for the given ConditionType if exists,
// otherwise returns nil.
func (s *ConditionedStatus) GetCondition(ct ConditionType) Condition {
for _, c := range s.Conditions {
if c.Type == ct {
return c
}
}
return Condition{Type: ct, Status: corev1.ConditionUnknown}
}
// SetConditions sets the supplied conditions, replacing any existing conditions
// of the same type. This is a no-op if all supplied conditions are identical,
// ignoring the last transition time, to those already set.
func (s *ConditionedStatus) SetConditions(c ...Condition) {
for _, cond := range c {
exists := false
for i, existing := range s.Conditions {
if existing.Type != cond.Type {
continue
}
if existing.Equal(cond) {
exists = true
continue
}
s.Conditions[i] = cond
exists = true
}
if !exists {
s.Conditions = append(s.Conditions, cond)
}
}
}
// Equal returns true if the status is identical to the supplied status,
// ignoring the LastTransitionTimes and order of statuses.
func (s *ConditionedStatus) Equal(other *ConditionedStatus) bool {
if s == nil || other == nil {
return s == nil && other == nil
}
if len(other.Conditions) != len(s.Conditions) {
return false
}
sc := make([]Condition, len(s.Conditions))
copy(sc, s.Conditions)
oc := make([]Condition, len(other.Conditions))
copy(oc, other.Conditions)
// We should not have more than one condition of each type.
sort.Slice(sc, func(i, j int) bool { return sc[i].Type < sc[j].Type })
sort.Slice(oc, func(i, j int) bool { return oc[i].Type < oc[j].Type })
for i := range sc {
if !sc[i].Equal(oc[i]) {
return false
}
}
return true
}
// Creating returns a condition that indicates the resource is currently
// being created.
func Creating() Condition {
return Condition{
Type: TypeReady,
Status: corev1.ConditionFalse,
LastTransitionTime: metav1.Now(),
Reason: ReasonCreating,
}
}
// Deleting returns a condition that indicates the resource is currently
// being deleted.
func Deleting() Condition {
return Condition{
Type: TypeReady,
Status: corev1.ConditionFalse,
LastTransitionTime: metav1.Now(),
Reason: ReasonDeleting,
}
}
// Available returns a condition that indicates the resource is
// currently observed to be available for use.
func Available() Condition {
return Condition{
Type: TypeReady,
Status: corev1.ConditionTrue,
LastTransitionTime: metav1.Now(),
Reason: ReasonAvailable,
}
}
// Unavailable returns a condition that indicates the resource is not
// currently available for use. Unavailable should be set only when Crossplane
// expects the resource to be available but knows it is not, for example
// because its API reports it is unhealthy.
func Unavailable() Condition {
return Condition{
Type: TypeReady,
Status: corev1.ConditionFalse,
LastTransitionTime: metav1.Now(),
Reason: ReasonUnavailable,
}
}
// ReconcileSuccess returns a condition indicating that Crossplane successfully
// completed the most recent reconciliation of the resource.
func ReconcileSuccess() Condition {
return Condition{
Type: TypeSynced,
Status: corev1.ConditionTrue,
LastTransitionTime: metav1.Now(),
Reason: ReasonReconcileSuccess,
}
}
// ReconcileError returns a condition indicating that Crossplane encountered an
// error while reconciling the resource. This could mean Crossplane was
// unable to update the resource to reflect its desired state, or that
// Crossplane was unable to determine the current actual state of the resource.
func ReconcileError(err error) Condition {
return Condition{
Type: TypeSynced,
Status: corev1.ConditionFalse,
LastTransitionTime: metav1.Now(),
Reason: ReasonReconcileError,
Message: err.Error(),
}
}
// ReconcilePaused returns a condition that indicates reconciliation on
// the managed resource is paused via the pause annotation.
func ReconcilePaused() Condition {
return Condition{
Type: TypeSynced,
Status: corev1.ConditionFalse,
LastTransitionTime: metav1.Now(),
Reason: ReasonReconcilePaused,
}
}

View File

@ -1,3 +0,0 @@
// Package common contains core API types used by most Crossplane resources.
// +kubebuilder:object:generate=true
package common

View File

@ -1,54 +0,0 @@
/*
Copyright 2021 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"dario.cat/mergo"
)
// MergeOptions Specifies merge options on a field path.
type MergeOptions struct { // TODO(aru): add more options that control merging behavior
// Specifies that already existing values in a merged map should be preserved
// +optional
KeepMapValues *bool `json:"keepMapValues,omitempty"`
// Specifies that already existing elements in a merged slice should be preserved
// +optional
AppendSlice *bool `json:"appendSlice,omitempty"`
}
// MergoConfiguration the default behavior is to replace maps and slices.
func (mo *MergeOptions) MergoConfiguration() []func(*mergo.Config) {
config := []func(*mergo.Config){mergo.WithOverride}
if mo == nil {
return config
}
if mo.KeepMapValues != nil && *mo.KeepMapValues {
config = config[:0]
}
if mo.AppendSlice != nil && *mo.AppendSlice {
config = append(config, mergo.WithAppendSlice)
}
return config
}
// IsAppendSlice returns true if mo.AppendSlice is set to true.
func (mo *MergeOptions) IsAppendSlice() bool {
return mo != nil && mo.AppendSlice != nil && *mo.AppendSlice
}

View File

@ -1,37 +0,0 @@
/*
Copyright 2024 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
// ObservedStatus contains the recent reconciliation stats.
type ObservedStatus struct {
// ObservedGeneration is the latest metadata.generation
// which resulted in either a ready state, or stalled due to error
// it can not recover from without human intervention.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
}
// SetObservedGeneration sets the generation of the main resource
// during the last reconciliation.
func (s *ObservedStatus) SetObservedGeneration(generation int64) {
s.ObservedGeneration = generation
}
// GetObservedGeneration returns the last observed generation of the main resource.
func (s *ObservedStatus) GetObservedGeneration() int64 {
return s.ObservedGeneration
}

View File

@ -1,120 +0,0 @@
/*
Copyright 2019 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
// ManagementPolicies determine how should Crossplane controllers manage an
// external resource through an array of ManagementActions.
type ManagementPolicies []ManagementAction
// A ManagementAction represents an action that the Crossplane controllers
// can take on an external resource.
// +kubebuilder:validation:Enum=Observe;Create;Update;Delete;LateInitialize;*
type ManagementAction string
const (
// ManagementActionObserve means that the managed resource status.atProvider
// will be updated with the external resource state.
ManagementActionObserve ManagementAction = "Observe"
// ManagementActionCreate means that the external resource will be created
// using the managed resource spec.initProvider and spec.forProvider.
ManagementActionCreate ManagementAction = "Create"
// ManagementActionUpdate means that the external resource will be updated
// using the managed resource spec.forProvider.
ManagementActionUpdate ManagementAction = "Update"
// ManagementActionDelete means that the external resource will be deleted
// when the managed resource is deleted.
ManagementActionDelete ManagementAction = "Delete"
// ManagementActionLateInitialize means that unspecified fields of the managed
// resource spec.forProvider will be updated with the external resource state.
ManagementActionLateInitialize ManagementAction = "LateInitialize"
// ManagementActionAll means that all of the above actions will be taken
// by the Crossplane controllers.
ManagementActionAll ManagementAction = "*"
)
// A DeletionPolicy determines what should happen to the underlying external
// resource when a managed resource is deleted.
// +kubebuilder:validation:Enum=Orphan;Delete
type DeletionPolicy string
const (
// DeletionOrphan means the external resource will be orphaned when its
// managed resource is deleted.
DeletionOrphan DeletionPolicy = "Orphan"
// DeletionDelete means both the external resource will be deleted when its
// managed resource is deleted.
DeletionDelete DeletionPolicy = "Delete"
)
// A CompositeDeletePolicy determines how the composite resource should be deleted
// when the corresponding claim is deleted.
// +kubebuilder:validation:Enum=Background;Foreground
type CompositeDeletePolicy string
const (
// CompositeDeleteBackground means the composite resource will be deleted using
// the Background Propagation Policy when the claim is deleted.
CompositeDeleteBackground CompositeDeletePolicy = "Background"
// CompositeDeleteForeground means the composite resource will be deleted using
// the Foreground Propagation Policy when the claim is deleted.
CompositeDeleteForeground CompositeDeletePolicy = "Foreground"
)
// An UpdatePolicy determines how something should be updated - either
// automatically (without human intervention) or manually.
// +kubebuilder:validation:Enum=Automatic;Manual
type UpdatePolicy string
const (
// UpdateAutomatic means the resource should be updated automatically,
// without any human intervention.
UpdateAutomatic UpdatePolicy = "Automatic"
// UpdateManual means the resource requires human intervention to
// update.
UpdateManual UpdatePolicy = "Manual"
)
// ResolvePolicy is a type for resolve policy.
type ResolvePolicy string
// ResolutionPolicy is a type for resolution policy.
type ResolutionPolicy string
const (
// ResolvePolicyAlways is a resolve option.
// When the ResolvePolicy is set to ResolvePolicyAlways the reference will
// be tried to resolve for every reconcile loop.
ResolvePolicyAlways ResolvePolicy = "Always"
// ResolutionPolicyRequired is a resolution option.
// When the ResolutionPolicy is set to ResolutionPolicyRequired the execution
// could not continue even if the reference cannot be resolved.
ResolutionPolicyRequired ResolutionPolicy = "Required"
// ResolutionPolicyOptional is a resolution option.
// When the ReferenceResolutionPolicy is set to ReferencePolicyOptional the
// execution could continue even if the reference cannot be resolved.
ResolutionPolicyOptional ResolutionPolicy = "Optional"
)

View File

@ -1,328 +0,0 @@
/*
Copyright 2019 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
)
const (
// ResourceCredentialsSecretEndpointKey is the key inside a connection secret for the connection endpoint.
ResourceCredentialsSecretEndpointKey = "endpoint"
// ResourceCredentialsSecretPortKey is the key inside a connection secret for the connection port.
ResourceCredentialsSecretPortKey = "port"
// ResourceCredentialsSecretUserKey is the key inside a connection secret for the connection user.
ResourceCredentialsSecretUserKey = "username"
// ResourceCredentialsSecretPasswordKey is the key inside a connection secret for the connection password.
ResourceCredentialsSecretPasswordKey = "password"
// ResourceCredentialsSecretCAKey is the key inside a connection secret for the server CA certificate.
ResourceCredentialsSecretCAKey = "clusterCA"
// ResourceCredentialsSecretClientCertKey is the key inside a connection secret for the client certificate.
ResourceCredentialsSecretClientCertKey = "clientCert"
// ResourceCredentialsSecretClientKeyKey is the key inside a connection secret for the client key.
ResourceCredentialsSecretClientKeyKey = "clientKey"
// ResourceCredentialsSecretTokenKey is the key inside a connection secret for the bearer token value.
ResourceCredentialsSecretTokenKey = "token"
// ResourceCredentialsSecretKubeconfigKey is the key inside a connection secret for the raw kubeconfig yaml.
ResourceCredentialsSecretKubeconfigKey = "kubeconfig"
)
// LabelKeyProviderKind is added to ProviderConfigUsages to relate them to their
// ProviderConfig.
const LabelKeyProviderKind = "crossplane.io/provider-config-kind"
// LabelKeyProviderName is added to ProviderConfigUsages to relate them to their
// ProviderConfig.
const LabelKeyProviderName = "crossplane.io/provider-config"
// NOTE(negz): The below secret references differ from ObjectReference and
// LocalObjectReference in that they include only the fields Crossplane needs to
// reference a secret, and make those fields required. This reduces ambiguity in
// the API for resource authors.
// A LocalSecretReference is a reference to a secret in the same namespace as
// the referencer.
type LocalSecretReference struct {
// Name of the secret.
Name string `json:"name"`
}
// A SecretReference is a reference to a secret in an arbitrary namespace.
type SecretReference struct {
// Name of the secret.
Name string `json:"name"`
// Namespace of the secret.
Namespace string `json:"namespace"`
}
// A SecretKeySelector is a reference to a secret key in an arbitrary namespace.
type SecretKeySelector struct {
SecretReference `json:",inline"`
// The key to select.
Key string `json:"key"`
}
// A LocalSecretKeySelector is a reference to a secret key
// in the same namespace with the referencing object.
type LocalSecretKeySelector struct {
LocalSecretReference `json:",inline"`
Key string `json:"key"`
}
// ToSecretKeySelector is a convenience method for converting the
// LocalSecretKeySelector to a SecretKeySelector with the given namespace.
func (ls *LocalSecretKeySelector) ToSecretKeySelector(namespace string) *SecretKeySelector {
return &SecretKeySelector{
SecretReference: SecretReference{
Name: ls.Name,
Namespace: namespace,
},
Key: ls.Key,
}
}
// Policy represents the Resolve and Resolution policies of Reference instance.
type Policy struct {
// Resolve specifies when this reference should be resolved. The default
// is 'IfNotPresent', which will attempt to resolve the reference only when
// the corresponding field is not present. Use 'Always' to resolve the
// reference on every reconcile.
// +optional
// +kubebuilder:validation:Enum=Always;IfNotPresent
Resolve *ResolvePolicy `json:"resolve,omitempty"`
// Resolution specifies whether resolution of this reference is required.
// The default is 'Required', which means the reconcile will fail if the
// reference cannot be resolved. 'Optional' means this reference will be
// a no-op if it cannot be resolved.
// +optional
// +kubebuilder:default=Required
// +kubebuilder:validation:Enum=Required;Optional
Resolution *ResolutionPolicy `json:"resolution,omitempty"`
}
// IsResolutionPolicyOptional checks whether the resolution policy of relevant reference is Optional.
func (p *Policy) IsResolutionPolicyOptional() bool {
if p == nil || p.Resolution == nil {
return false
}
return *p.Resolution == ResolutionPolicyOptional
}
// IsResolvePolicyAlways checks whether the resolution policy of relevant reference is Always.
func (p *Policy) IsResolvePolicyAlways() bool {
if p == nil || p.Resolve == nil {
return false
}
return *p.Resolve == ResolvePolicyAlways
}
// A Reference to a named object.
type Reference struct {
// Name of the referenced object.
Name string `json:"name"`
// Policies for referencing.
// +optional
Policy *Policy `json:"policy,omitempty"`
}
// A NamespacedReference to a named object.
type NamespacedReference struct {
// Name of the referenced object.
Name string `json:"name"`
// Namespace of the referenced object
// +optional
Namespace string `json:"namespace,omitempty"`
// Policies for referencing.
// +optional
Policy *Policy `json:"policy,omitempty"`
}
// A TypedReference refers to an object by Name, Kind, and APIVersion. It is
// commonly used to reference cluster-scoped objects or objects where the
// namespace is already known.
type TypedReference struct {
// APIVersion of the referenced object.
APIVersion string `json:"apiVersion"`
// Kind of the referenced object.
Kind string `json:"kind"`
// Name of the referenced object.
Name string `json:"name"`
// UID of the referenced object.
// +optional
UID types.UID `json:"uid,omitempty"`
}
// A Selector selects an object.
type Selector struct {
// MatchLabels ensures an object with matching labels is selected.
MatchLabels map[string]string `json:"matchLabels,omitempty"`
// MatchControllerRef ensures an object with the same controller reference
// as the selecting object is selected.
MatchControllerRef *bool `json:"matchControllerRef,omitempty"`
// Policies for selection.
// +optional
Policy *Policy `json:"policy,omitempty"`
}
// NamespacedSelector selects a namespaced object.
type NamespacedSelector struct {
// MatchLabels ensures an object with matching labels is selected.
MatchLabels map[string]string `json:"matchLabels,omitempty"`
// MatchControllerRef ensures an object with the same controller reference
// as the selecting object is selected.
MatchControllerRef *bool `json:"matchControllerRef,omitempty"`
// Policies for selection.
// +optional
Policy *Policy `json:"policy,omitempty"`
// Namespace for the selector
// +optional
Namespace string `json:"namespace,omitempty"`
}
// ProviderConfigReference is a typed reference to a ProviderConfig
// object, with a known api group.
type ProviderConfigReference struct {
// Kind of the referenced object.
Kind string `json:"kind"`
// Name of the referenced object.
Name string `json:"name"`
}
// SetGroupVersionKind sets the Kind and APIVersion of a TypedReference.
func (obj *TypedReference) SetGroupVersionKind(gvk schema.GroupVersionKind) {
obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind()
}
// GroupVersionKind gets the GroupVersionKind of a TypedReference.
func (obj *TypedReference) GroupVersionKind() schema.GroupVersionKind {
return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind)
}
// GetObjectKind get the ObjectKind of a TypedReference.
func (obj *TypedReference) GetObjectKind() schema.ObjectKind { return obj }
// ResourceStatus represents the observed state of a managed resource.
type ResourceStatus struct {
ConditionedStatus `json:",inline"`
ObservedStatus `json:",inline"`
}
// A CredentialsSource is a source from which provider credentials may be
// acquired.
type CredentialsSource string
const (
// CredentialsSourceNone indicates that a provider does not require
// credentials.
CredentialsSourceNone CredentialsSource = "None"
// CredentialsSourceSecret indicates that a provider should acquire
// credentials from a secret.
CredentialsSourceSecret CredentialsSource = "Secret"
// CredentialsSourceInjectedIdentity indicates that a provider should use
// credentials via its (pod's) identity; i.e. via IRSA for AWS,
// Workload Identity for GCP, Pod Identity for Azure, or in-cluster
// authentication for the Kubernetes API.
CredentialsSourceInjectedIdentity CredentialsSource = "InjectedIdentity"
// CredentialsSourceEnvironment indicates that a provider should acquire
// credentials from an environment variable.
CredentialsSourceEnvironment CredentialsSource = "Environment"
// CredentialsSourceFilesystem indicates that a provider should acquire
// credentials from the filesystem.
CredentialsSourceFilesystem CredentialsSource = "Filesystem"
)
// CommonCredentialSelectors provides common selectors for extracting
// credentials.
//
//nolint:revive // preserve backward-compatibility
type CommonCredentialSelectors struct {
// Fs is a reference to a filesystem location that contains credentials that
// must be used to connect to the provider.
// +optional
Fs *FsSelector `json:"fs,omitempty"`
// Env is a reference to an environment variable that contains credentials
// that must be used to connect to the provider.
// +optional
Env *EnvSelector `json:"env,omitempty"`
// A SecretRef is a reference to a secret key that contains the credentials
// that must be used to connect to the provider.
// +optional
SecretRef *SecretKeySelector `json:"secretRef,omitempty"`
}
// EnvSelector selects an environment variable.
type EnvSelector struct {
// Name is the name of an environment variable.
Name string `json:"name"`
}
// FsSelector selects a filesystem location.
type FsSelector struct {
// Path is a filesystem path.
Path string `json:"path"`
}
// A ProviderConfigStatus defines the observed status of a ProviderConfig.
type ProviderConfigStatus struct {
ConditionedStatus `json:",inline"`
// Users of this provider configuration.
Users int64 `json:"users,omitempty"`
}
// A ProviderConfigUsage is a record that a particular managed resource is using
// a particular provider configuration.
type ProviderConfigUsage struct {
// ProviderConfigReference to the provider config being used.
ProviderConfigReference Reference `json:"providerConfigRef"`
// ResourceReference to the managed resource using the provider config.
ResourceReference TypedReference `json:"resourceRef"`
}
// A TypedProviderConfigUsage is a record that a particular managed resource is using
// a particular provider configuration.
type TypedProviderConfigUsage struct {
// ProviderConfigReference to the provider config being used.
ProviderConfigReference ProviderConfigReference `json:"providerConfigRef"`
// ResourceReference to the managed resource using the provider config.
ResourceReference TypedReference `json:"resourceRef"`
}

View File

@ -17,60 +17,78 @@ limitations under the License.
package v1
import (
"github.com/crossplane/crossplane-runtime/apis/common"
"sort"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// A ConditionType represents a condition a resource could be in.
type ConditionType = common.ConditionType
type ConditionType string
// Condition types.
const (
// TypeReady resources are believed to be ready to handle work.
TypeReady ConditionType = common.TypeReady
TypeReady ConditionType = "Ready"
// TypeSynced resources are believed to be in sync with the
// Kubernetes resources that manage their lifecycle.
TypeSynced ConditionType = common.TypeSynced
// TypeHealthy resources are believed to be in a healthy state and to have all
// of their child resources in a healthy state. For example, a claim is
// healthy when the claim is synced and the underlying composite resource is
// both synced and healthy. A composite resource is healthy when the composite
// resource is synced and all composed resources are synced and, if
// applicable, healthy (e.g., the composed resource is a composite resource).
// TODO: This condition is not yet implemented. It is currently just reserved
// as a system condition. See the tracking issue for more details
// https://github.com/crossplane/crossplane/issues/5643.
TypeHealthy ConditionType = common.TypeHealthy
TypeSynced ConditionType = "Synced"
)
// A ConditionReason represents the reason a resource is in a condition.
type ConditionReason = common.ConditionReason
type ConditionReason string
// Reasons a resource is or is not ready.
const (
ReasonAvailable = common.ReasonAvailable
ReasonUnavailable = common.ReasonUnavailable
ReasonCreating = common.ReasonCreating
ReasonDeleting = common.ReasonDeleting
ReasonAvailable ConditionReason = "Available"
ReasonUnavailable ConditionReason = "Unavailable"
ReasonCreating ConditionReason = "Creating"
ReasonDeleting ConditionReason = "Deleting"
)
// Reasons a resource is or is not synced.
const (
ReasonReconcileSuccess = common.ReasonReconcileSuccess
ReasonReconcileError = common.ReasonReconcileError
ReasonReconcilePaused = common.ReasonReconcilePaused
ReasonReconcileSuccess ConditionReason = "ReconcileSuccess"
ReasonReconcileError ConditionReason = "ReconcileError"
)
// See https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties
// A Condition that may apply to a resource.
type Condition = common.Condition
type Condition struct {
// Type of this condition. At most one of each condition type may apply to
// a resource at any point in time.
Type ConditionType `json:"type"`
// IsSystemConditionType returns true if the condition is owned by the
// Crossplane system (e.g, Ready, Synced, Healthy).
func IsSystemConditionType(t ConditionType) bool {
return common.IsSystemConditionType(t)
// Status of this condition; is it currently True, False, or Unknown?
Status corev1.ConditionStatus `json:"status"`
// LastTransitionTime is the last time this condition transitioned from one
// status to another.
LastTransitionTime metav1.Time `json:"lastTransitionTime"`
// A Reason for this condition's last transition from one status to another.
Reason ConditionReason `json:"reason"`
// A Message containing details about this condition's last transition from
// one status to another, if any.
// +optional
Message string `json:"message,omitempty"`
}
// Equal returns true if the condition is identical to the supplied condition,
// ignoring the LastTransitionTime.
func (c Condition) Equal(other Condition) bool {
return c.Type == other.Type &&
c.Status == other.Status &&
c.Reason == other.Reason &&
c.Message == other.Message
}
// WithMessage returns a condition by adding the provided message to existing
// condition.
func (c Condition) WithMessage(msg string) Condition {
c.Message = msg
return c
}
// NOTE(negz): Conditions are implemented as a slice rather than a map to comply
@ -82,29 +100,117 @@ func IsSystemConditionType(t ConditionType) bool {
// A ConditionedStatus reflects the observed status of a resource. Only
// one condition of each type may exist.
type ConditionedStatus = common.ConditionedStatus
type ConditionedStatus struct {
// Conditions of the resource.
// +optional
Conditions []Condition `json:"conditions,omitempty"`
}
// NewConditionedStatus returns a stat with the supplied conditions set.
func NewConditionedStatus(c ...Condition) *ConditionedStatus {
return common.NewConditionedStatus(c...)
s := &ConditionedStatus{}
s.SetConditions(c...)
return s
}
// GetCondition returns the condition for the given ConditionType if exists,
// otherwise returns nil
func (s *ConditionedStatus) GetCondition(ct ConditionType) Condition {
for _, c := range s.Conditions {
if c.Type == ct {
return c
}
}
return Condition{Type: ct, Status: corev1.ConditionUnknown}
}
// SetConditions sets the supplied conditions, replacing any existing conditions
// of the same type. This is a no-op if all supplied conditions are identical,
// ignoring the last transition time, to those already set.
func (s *ConditionedStatus) SetConditions(c ...Condition) {
for _, new := range c {
exists := false
for i, existing := range s.Conditions {
if existing.Type != new.Type {
continue
}
if existing.Equal(new) {
exists = true
continue
}
s.Conditions[i] = new
exists = true
}
if !exists {
s.Conditions = append(s.Conditions, new)
}
}
}
// Equal returns true if the status is identical to the supplied status,
// ignoring the LastTransitionTimes and order of statuses.
func (s *ConditionedStatus) Equal(other *ConditionedStatus) bool {
if s == nil || other == nil {
return s == nil && other == nil
}
if len(other.Conditions) != len(s.Conditions) {
return false
}
sc := make([]Condition, len(s.Conditions))
copy(sc, s.Conditions)
oc := make([]Condition, len(other.Conditions))
copy(oc, other.Conditions)
// We should not have more than one condition of each type.
sort.Slice(sc, func(i, j int) bool { return sc[i].Type < sc[j].Type })
sort.Slice(oc, func(i, j int) bool { return oc[i].Type < oc[j].Type })
for i := range sc {
if !sc[i].Equal(oc[i]) {
return false
}
}
return true
}
// Creating returns a condition that indicates the resource is currently
// being created.
func Creating() Condition {
return common.Creating()
return Condition{
Type: TypeReady,
Status: corev1.ConditionFalse,
LastTransitionTime: metav1.Now(),
Reason: ReasonCreating,
}
}
// Deleting returns a condition that indicates the resource is currently
// being deleted.
func Deleting() Condition {
return common.Deleting()
return Condition{
Type: TypeReady,
Status: corev1.ConditionFalse,
LastTransitionTime: metav1.Now(),
Reason: ReasonDeleting,
}
}
// Available returns a condition that indicates the resource is
// currently observed to be available for use.
func Available() Condition {
return common.Available()
return Condition{
Type: TypeReady,
Status: corev1.ConditionTrue,
LastTransitionTime: metav1.Now(),
Reason: ReasonAvailable,
}
}
// Unavailable returns a condition that indicates the resource is not
@ -112,13 +218,23 @@ func Available() Condition {
// expects the resource to be available but knows it is not, for example
// because its API reports it is unhealthy.
func Unavailable() Condition {
return common.Unavailable()
return Condition{
Type: TypeReady,
Status: corev1.ConditionFalse,
LastTransitionTime: metav1.Now(),
Reason: ReasonUnavailable,
}
}
// ReconcileSuccess returns a condition indicating that Crossplane successfully
// completed the most recent reconciliation of the resource.
func ReconcileSuccess() Condition {
return common.ReconcileSuccess()
return Condition{
Type: TypeSynced,
Status: corev1.ConditionTrue,
LastTransitionTime: metav1.Now(),
Reason: ReasonReconcileSuccess,
}
}
// ReconcileError returns a condition indicating that Crossplane encountered an
@ -126,11 +242,11 @@ func ReconcileSuccess() Condition {
// unable to update the resource to reflect its desired state, or that
// Crossplane was unable to determine the current actual state of the resource.
func ReconcileError(err error) Condition {
return common.ReconcileError(err)
}
// ReconcilePaused returns a condition that indicates reconciliation on
// the managed resource is paused via the pause annotation.
func ReconcilePaused() Condition {
return common.ReconcilePaused()
return Condition{
Type: TypeSynced,
Status: corev1.ConditionFalse,
LastTransitionTime: metav1.Now(),
Reason: ReasonReconcileError,
Message: err.Error(),
}
}

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package common
package v1
import (
"testing"
@ -32,25 +32,6 @@ func TestConditionEqual(t *testing.T) {
b Condition
want bool
}{
"Identical": {
a: Condition{
Type: TypeReady,
Status: corev1.ConditionTrue,
LastTransitionTime: metav1.Now(),
Reason: ReasonCreating,
Message: "UnitTest",
ObservedGeneration: 1,
},
b: Condition{
Type: TypeReady,
Status: corev1.ConditionTrue,
LastTransitionTime: metav1.Now(),
Reason: ReasonCreating,
Message: "UnitTest",
ObservedGeneration: 1,
},
want: true,
},
"IdenticalIgnoringTimestamp": {
a: Condition{Type: TypeReady, LastTransitionTime: metav1.Now()},
b: Condition{Type: TypeReady, LastTransitionTime: metav1.Now()},
@ -76,20 +57,6 @@ func TestConditionEqual(t *testing.T) {
b: Condition{Message: "uncool"},
want: false,
},
"DifferentObservedGeneration": {
a: Condition{ObservedGeneration: 1},
b: Condition{},
want: false,
},
"CheckReconcilePaused": {
a: ReconcilePaused(),
b: Condition{
Type: TypeSynced,
Status: corev1.ConditionFalse,
Reason: ReasonReconcilePaused,
},
want: true,
},
}
for name, tc := range cases {
@ -163,11 +130,6 @@ func TestSetConditions(t *testing.T) {
c: []Condition{Available()},
want: NewConditionedStatus(Available()),
},
"ObservedGenerationIsUpdated": {
cs: NewConditionedStatus(Available().WithObservedGeneration(1)),
c: []Condition{Available().WithObservedGeneration(2)},
want: NewConditionedStatus(Available().WithObservedGeneration(2)),
},
"TypeIsDifferent": {
cs: NewConditionedStatus(Creating()),
c: []Condition{Available()},
@ -257,64 +219,3 @@ func TestConditionWithMessage(t *testing.T) {
})
}
}
func TestConditionWithObservedGeneration(t *testing.T) {
cases := map[string]struct {
c Condition
observedGeneration int64
want Condition
}{
"Added": {
c: Condition{Type: TypeReady, Reason: ReasonUnavailable},
observedGeneration: 10,
want: Condition{Type: TypeReady, Reason: ReasonUnavailable, ObservedGeneration: 10},
},
"Changed": {
c: Condition{Type: TypeReady, Reason: ReasonUnavailable, ObservedGeneration: 3},
observedGeneration: 10,
want: Condition{Type: TypeReady, Reason: ReasonUnavailable, ObservedGeneration: 10},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
got := tc.c.WithObservedGeneration(tc.observedGeneration)
if diff := cmp.Diff(tc.want, got); diff != "" {
t.Errorf("a.Equal(b): -want, +got:\n%s", diff)
}
})
}
}
func TestIsSystemConditionType(t *testing.T) {
cases := map[string]struct {
c Condition
want bool
}{
"SystemReady": {
c: Condition{Type: ConditionType("Ready")},
want: true,
},
"SystemSynced": {
c: Condition{Type: ConditionType("Synced")},
want: true,
},
"SystemHealthy": {
c: Condition{Type: ConditionType("Healthy")},
want: true,
},
"Custom": {
c: Condition{Type: ConditionType("Custom")},
want: false,
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
if diff := cmp.Diff(tc.want, IsSystemConditionType(tc.c.Type)); diff != "" {
t.Errorf("IsSystemConditionType(tc.c.Type): -want, +got:\n%s", diff)
}
})
}
}

View File

@ -0,0 +1,226 @@
/*
Copyright 2019 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
)
const (
// LabelKeyOwnerUID is the UID of the owner resource of a connection secret.
// Kubernetes provides owner/controller references to track ownership of
// resources including secrets, however, this would only work for in cluster
// k8s secrets. We opted to use a label for this purpose to be consistent
// across Secret Store implementations and expect all to support
// setting/getting labels.
LabelKeyOwnerUID = "secret.crossplane.io/owner-uid"
)
// PublishConnectionDetailsTo represents configuration of a connection secret.
type PublishConnectionDetailsTo struct {
// Name is the name of the connection secret.
Name string `json:"name"`
// Metadata is the metadata for connection secret.
// +optional
Metadata *ConnectionSecretMetadata `json:"metadata,omitempty"`
// SecretStoreConfigRef specifies which secret store config should be used
// for this ConnectionSecret.
// +optional
// +kubebuilder:default={"name": "default"}
SecretStoreConfigRef *Reference `json:"configRef,omitempty"`
}
// ConnectionSecretMetadata represents metadata of a connection secret.
// Labels are used to track ownership of connection secrets and has to be
// supported for any secret store implementation.
type ConnectionSecretMetadata struct {
// Labels are the labels/tags to be added to connection secret.
// - For Kubernetes secrets, this will be used as "metadata.labels".
// - It is up to Secret Store implementation for others store types.
// +optional
Labels map[string]string `json:"labels,omitempty"`
// Annotations are the annotations to be added to connection secret.
// - For Kubernetes secrets, this will be used as "metadata.annotations".
// - It is up to Secret Store implementation for others store types.
// +optional
Annotations map[string]string `json:"annotations,omitempty"`
// Type is the SecretType for the connection secret.
// - Only valid for Kubernetes Secret Stores.
// +optional
Type *corev1.SecretType `json:"type,omitempty"`
}
// SetOwnerUID sets owner object uid label.
func (in *ConnectionSecretMetadata) SetOwnerUID(uid types.UID) {
if in.Labels == nil {
in.Labels = map[string]string{}
}
in.Labels[LabelKeyOwnerUID] = string(uid)
}
// GetOwnerUID gets owner object uid.
func (in *ConnectionSecretMetadata) GetOwnerUID() string {
if u, ok := in.Labels[LabelKeyOwnerUID]; ok {
return u
}
return ""
}
// SecretStoreType represents a secret store type.
type SecretStoreType string
const (
// SecretStoreKubernetes indicates that secret store type is
// Kubernetes. In other words, connection secrets will be stored as K8s
// Secrets.
SecretStoreKubernetes SecretStoreType = "Kubernetes"
// SecretStoreVault indicates that secret store type is Vault.
SecretStoreVault SecretStoreType = "Vault"
)
// SecretStoreConfig represents configuration of a Secret Store.
type SecretStoreConfig struct {
// Type configures which secret store to be used. Only the configuration
// block for this store will be used and others will be ignored if provided.
// Default is Kubernetes.
// +optional
// +kubebuilder:default=Kubernetes
Type *SecretStoreType `json:"type,omitempty"`
// DefaultScope used for scoping secrets for "cluster-scoped" resources.
// If store type is "Kubernetes", this would mean the default namespace to
// store connection secrets for cluster scoped resources.
// In case of "Vault", this would be used as the default parent path.
// Typically, should be set as Crossplane installation namespace.
DefaultScope string `json:"defaultScope"`
// Kubernetes configures a Kubernetes secret store.
// If the "type" is "Kubernetes" but no config provided, in cluster config
// will be used.
// +optional
Kubernetes *KubernetesSecretStoreConfig `json:"kubernetes,omitempty"`
// Vault configures a Vault secret store.
// +optional
Vault *VaultSecretStoreConfig `json:"vault,omitempty"`
}
// KubernetesAuthConfig required to authenticate to a K8s API. It expects
// a "kubeconfig" file to be provided.
type KubernetesAuthConfig struct {
// Source of the credentials.
// +kubebuilder:validation:Enum=None;Secret;Environment;Filesystem
Source CredentialsSource `json:"source"`
// CommonCredentialSelectors provides common selectors for extracting
// credentials.
CommonCredentialSelectors `json:",inline"`
}
// KubernetesSecretStoreConfig represents the required configuration
// for a Kubernetes secret store.
type KubernetesSecretStoreConfig struct {
// Credentials used to connect to the Kubernetes API.
Auth KubernetesAuthConfig `json:"auth"`
// TODO(turkenh): Support additional identities like
// https://github.com/crossplane-contrib/provider-kubernetes/blob/4d722ef914e6964e80e190317daca9872ae98738/apis/v1alpha1/types.go#L34
}
// VaultAuthMethod represent a Vault authentication method.
// https://www.vaultproject.io/docs/auth
type VaultAuthMethod string
const (
// VaultAuthToken indicates that "Token Auth" will be used to
// authenticate to Vault.
// https://www.vaultproject.io/docs/auth/token
VaultAuthToken VaultAuthMethod = "Token"
)
// VaultAuthTokenConfig represents configuration for Vault Token Auth Method.
// https://www.vaultproject.io/docs/auth/token
type VaultAuthTokenConfig struct {
// Source of the credentials.
// +kubebuilder:validation:Enum=None;Secret;Environment;Filesystem
Source CredentialsSource `json:"source"`
// CommonCredentialSelectors provides common selectors for extracting
// credentials.
CommonCredentialSelectors `json:",inline"`
}
// VaultAuthConfig required to authenticate to a Vault API.
type VaultAuthConfig struct {
// Method configures which auth method will be used.
Method VaultAuthMethod `json:"method"`
// Token configures Token Auth for Vault.
// +optional
Token *VaultAuthTokenConfig `json:"token,omitempty"`
}
// VaultCABundleConfig represents configuration for configuring a CA bundle.
type VaultCABundleConfig struct {
// Source of the credentials.
// +kubebuilder:validation:Enum=None;Secret;Environment;Filesystem
Source CredentialsSource `json:"source"`
// CommonCredentialSelectors provides common selectors for extracting
// credentials.
CommonCredentialSelectors `json:",inline"`
}
// VaultKVVersion represent API version of the Vault KV engine
// https://www.vaultproject.io/docs/secrets/kv
type VaultKVVersion string
const (
// VaultKVVersionV1 indicates that Secret API is KV Secrets Engine Version 1
// https://www.vaultproject.io/docs/secrets/kv/kv-v1
VaultKVVersionV1 VaultKVVersion = "v1"
// VaultKVVersionV2 indicates that Secret API is KV Secrets Engine Version 2
// https://www.vaultproject.io/docs/secrets/kv/kv-v2
VaultKVVersionV2 VaultKVVersion = "v2"
)
// VaultSecretStoreConfig represents the required configuration for a Vault
// secret store.
type VaultSecretStoreConfig struct {
// Server is the url of the Vault server, e.g. "https://vault.acme.org"
Server string `json:"server"`
// MountPath is the mount path of the KV secrets engine.
MountPath string `json:"mountPath"`
// Version of the KV Secrets engine of Vault.
// https://www.vaultproject.io/docs/secrets/kv
// +optional
// +kubebuilder:default=v2
Version *VaultKVVersion `json:"version,omitempty"`
// CABundle configures CA bundle for Vault Server.
// +optional
CABundle *VaultCABundleConfig `json:"caBundle,omitempty"`
// Auth configures an authentication method for Vault.
Auth VaultAuthConfig `json:"auth"`
}

View File

@ -17,8 +17,36 @@ limitations under the License.
package v1
import (
"github.com/crossplane/crossplane-runtime/apis/common"
"github.com/imdario/mergo"
)
// MergeOptions Specifies merge options on a field path.
type MergeOptions = common.MergeOptions
// MergeOptions Specifies merge options on a field path
type MergeOptions struct { // TODO(aru): add more options that control merging behavior
// Specifies that already existing values in a merged map should be preserved
// +optional
KeepMapValues *bool `json:"keepMapValues,omitempty"`
// Specifies that already existing elements in a merged slice should be preserved
// +optional
AppendSlice *bool `json:"appendSlice,omitempty"`
}
// MergoConfiguration the default behavior is to replace maps and slices
func (mo *MergeOptions) MergoConfiguration() []func(*mergo.Config) {
config := []func(*mergo.Config){mergo.WithOverride}
if mo == nil {
return config
}
if mo.KeepMapValues != nil && *mo.KeepMapValues {
config = config[:0]
}
if mo.AppendSlice != nil && *mo.AppendSlice {
config = append(config, mergo.WithAppendSlice)
}
return config
}
// IsAppendSlice returns true if mo.AppendSlice is set to true
func (mo *MergeOptions) IsAppendSlice() bool {
return mo != nil && mo.AppendSlice != nil && *mo.AppendSlice
}

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package common
package v1
import (
"reflect"
@ -22,8 +22,8 @@ import (
"sort"
"testing"
"dario.cat/mergo"
"github.com/google/go-cmp/cmp"
"github.com/imdario/mergo"
)
type mergoOptArr []func(*mergo.Config)
@ -33,15 +33,12 @@ func (arr mergoOptArr) names() []string {
for i, opt := range arr {
names[i] = runtime.FuncForPC(reflect.ValueOf(opt).Pointer()).Name()
}
sort.Strings(names)
return names
}
func TestMergoConfiguration(t *testing.T) {
valTrue := true
tests := map[string]struct {
mo *MergeOptions
want mergoOptArr
@ -87,6 +84,7 @@ func TestMergoConfiguration(t *testing.T) {
if diff := cmp.Diff(tc.want.names(), mergoOptArr(tc.mo.MergoConfiguration()).names()); diff != "" {
t.Errorf("\nmo.MergoConfiguration(): -want, +got:\n %s", diff)
}
})
}
}

View File

@ -1,24 +0,0 @@
/*
Copyright 2024 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"github.com/crossplane/crossplane-runtime/apis/common"
)
// ObservedStatus contains the recent reconciliation stats.
type ObservedStatus = common.ObservedStatus

View File

@ -16,52 +16,14 @@ limitations under the License.
package v1
import (
"github.com/crossplane/crossplane-runtime/apis/common"
)
// ManagementPolicies determine how should Crossplane controllers manage an
// external resource through an array of ManagementActions.
type ManagementPolicies = common.ManagementPolicies
// A ManagementAction represents an action that the Crossplane controllers
// can take on an external resource.
type ManagementAction = common.ManagementAction
const (
// ManagementActionObserve means that the managed resource status.atProvider
// will be updated with the external resource state.
ManagementActionObserve = common.ManagementActionObserve
// ManagementActionCreate means that the external resource will be created
// using the managed resource spec.initProvider and spec.forProvider.
ManagementActionCreate = common.ManagementActionCreate
// ManagementActionUpdate means that the external resource will be updated
// using the managed resource spec.forProvider.
ManagementActionUpdate = common.ManagementActionUpdate
// ManagementActionDelete means that the external resource will be deleted
// when the managed resource is deleted.
ManagementActionDelete = common.ManagementActionDelete
// ManagementActionLateInitialize means that unspecified fields of the managed
// resource spec.forProvider will be updated with the external resource state.
ManagementActionLateInitialize = common.ManagementActionLateInitialize
// ManagementActionAll means that all of the above actions will be taken
// by the Crossplane controllers.
ManagementActionAll = common.ManagementActionAll
)
// A DeletionPolicy determines what should happen to the underlying external
// resource when a managed resource is deleted.
// +kubebuilder:validation:Enum=Orphan;Delete
type DeletionPolicy string
const (
// DeletionOrphan means the external resource will be orphaned when its
// managed resource is deleted.
// DeletionOrphan means the external resource will orphaned when its managed
// resource is deleted.
DeletionOrphan DeletionPolicy = "Orphan"
// DeletionDelete means both the external resource will be deleted when its
@ -69,53 +31,40 @@ const (
DeletionDelete DeletionPolicy = "Delete"
)
// A CompositeDeletePolicy determines how the composite resource should be deleted
// when the corresponding claim is deleted.
type CompositeDeletePolicy = common.CompositeDeletePolicy
const (
// CompositeDeleteBackground means the composite resource will be deleted using
// the Background Propagation Policy when the claim is deleted.
CompositeDeleteBackground = common.CompositeDeleteBackground
// CompositeDeleteForeground means the composite resource will be deleted using
// the Foreground Propagation Policy when the claim is deleted.
CompositeDeleteForeground = common.CompositeDeleteForeground
)
// An UpdatePolicy determines how something should be updated - either
// automatically (without human intervention) or manually.
type UpdatePolicy = common.UpdatePolicy
// +kubebuilder:validation:Enum=Automatic;Manual
type UpdatePolicy string
const (
// UpdateAutomatic means the resource should be updated automatically,
// without any human intervention.
UpdateAutomatic = common.UpdateAutomatic
UpdateAutomatic UpdatePolicy = "Automatic"
// UpdateManual means the resource requires human intervention to
// update.
UpdateManual = common.UpdateManual
UpdateManual UpdatePolicy = "Manual"
)
// ResolvePolicy is a type for resolve policy.
type ResolvePolicy = common.ResolvePolicy
type ResolvePolicy string
// ResolutionPolicy is a type for resolution policy.
type ResolutionPolicy = common.ResolutionPolicy
type ResolutionPolicy string
const (
// ResolvePolicyAlways is a resolve option.
// When the ResolvePolicy is set to ResolvePolicyAlways the reference will
// be tried to resolve for every reconcile loop.
ResolvePolicyAlways = common.ResolvePolicyAlways
ResolvePolicyAlways ResolvePolicy = "Always"
// ResolutionPolicyRequired is a resolution option.
// When the ResolutionPolicy is set to ResolutionPolicyRequired the execution
// could not continue even if the reference cannot be resolved.
ResolutionPolicyRequired = common.ResolutionPolicyRequired
ResolutionPolicyRequired ResolutionPolicy = "Required"
// ResolutionPolicyOptional is a resolution option.
// When the ReferenceResolutionPolicy is set to ReferencePolicyOptional the
// execution could continue even if the reference cannot be resolved.
ResolutionPolicyOptional = common.ResolutionPolicyOptional
ResolutionPolicyOptional ResolutionPolicy = "Optional"
)

View File

@ -19,37 +19,34 @@ package v1
import (
corev1 "k8s.io/api/core/v1"
"github.com/crossplane/crossplane-runtime/apis/common"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
)
const (
// ResourceCredentialsSecretEndpointKey is the key inside a connection secret for the connection endpoint.
// ResourceCredentialsSecretEndpointKey is the key inside a connection secret for the connection endpoint
ResourceCredentialsSecretEndpointKey = "endpoint"
// ResourceCredentialsSecretPortKey is the key inside a connection secret for the connection port.
// ResourceCredentialsSecretPortKey is the key inside a connection secret for the connection port
ResourceCredentialsSecretPortKey = "port"
// ResourceCredentialsSecretUserKey is the key inside a connection secret for the connection user.
// ResourceCredentialsSecretUserKey is the key inside a connection secret for the connection user
ResourceCredentialsSecretUserKey = "username"
// ResourceCredentialsSecretPasswordKey is the key inside a connection secret for the connection password.
// ResourceCredentialsSecretPasswordKey is the key inside a connection secret for the connection password
ResourceCredentialsSecretPasswordKey = "password"
// ResourceCredentialsSecretCAKey is the key inside a connection secret for the server CA certificate.
// ResourceCredentialsSecretCAKey is the key inside a connection secret for the server CA certificate
ResourceCredentialsSecretCAKey = "clusterCA"
// ResourceCredentialsSecretClientCertKey is the key inside a connection secret for the client certificate.
// ResourceCredentialsSecretClientCertKey is the key inside a connection secret for the client certificate
ResourceCredentialsSecretClientCertKey = "clientCert"
// ResourceCredentialsSecretClientKeyKey is the key inside a connection secret for the client key.
// ResourceCredentialsSecretClientKeyKey is the key inside a connection secret for the client key
ResourceCredentialsSecretClientKeyKey = "clientKey"
// ResourceCredentialsSecretTokenKey is the key inside a connection secret for the bearer token value.
// ResourceCredentialsSecretTokenKey is the key inside a connection secret for the bearer token value
ResourceCredentialsSecretTokenKey = "token"
// ResourceCredentialsSecretKubeconfigKey is the key inside a connection secret for the raw kubeconfig yaml.
// ResourceCredentialsSecretKubeconfigKey is the key inside a connection secret for the raw kubeconfig yaml
ResourceCredentialsSecretKubeconfigKey = "kubeconfig"
)
// LabelKeyProviderKind is added to ProviderConfigUsages to relate them to their
// ProviderConfig.
const LabelKeyProviderKind = common.LabelKeyProviderKind
// LabelKeyProviderName is added to ProviderConfigUsages to relate them to their
// ProviderConfig.
const LabelKeyProviderName = common.LabelKeyProviderName
const LabelKeyProviderName = "crossplane.io/provider-config"
// NOTE(negz): The below secret references differ from ObjectReference and
// LocalObjectReference in that they include only the fields Crossplane needs to
@ -58,41 +55,118 @@ const LabelKeyProviderName = common.LabelKeyProviderName
// A LocalSecretReference is a reference to a secret in the same namespace as
// the referencer.
type LocalSecretReference = common.LocalSecretReference
type LocalSecretReference struct {
// Name of the secret.
Name string `json:"name"`
}
// A SecretReference is a reference to a secret in an arbitrary namespace.
type SecretReference = common.SecretReference
type SecretReference struct {
// Name of the secret.
Name string `json:"name"`
// Namespace of the secret.
Namespace string `json:"namespace"`
}
// A SecretKeySelector is a reference to a secret key in an arbitrary namespace.
type SecretKeySelector = common.SecretKeySelector
type SecretKeySelector struct {
SecretReference `json:",inline"`
// A LocalSecretKeySelector is a reference to a secret key
// in the same namespace with the referencing object.
type LocalSecretKeySelector = common.LocalSecretKeySelector
// The key to select.
Key string `json:"key"`
}
// Policy represents the Resolve and Resolution policies of Reference instance.
type Policy = common.Policy
type Policy struct {
// Resolve specifies when this reference should be resolved. The default
// is 'IfNotPresent', which will attempt to resolve the reference only when
// the corresponding field is not present. Use 'Always' to resolve the
// reference on every reconcile.
// +optional
// +kubebuilder:validation:Enum=Always;IfNotPresent
Resolve *ResolvePolicy `json:"resolve,omitempty"`
// Resolution specifies whether resolution of this reference is required.
// The default is 'Required', which means the reconcile will fail if the
// reference cannot be resolved. 'Optional' means this reference will be
// a no-op if it cannot be resolved.
// +optional
// +kubebuilder:default=Required
// +kubebuilder:validation:Enum=Required;Optional
Resolution *ResolutionPolicy `json:"resolution,omitempty"`
}
// IsResolutionPolicyOptional checks whether the resolution policy of relevant reference is Optional.
func (p *Policy) IsResolutionPolicyOptional() bool {
if p == nil || p.Resolution == nil {
return false
}
return *p.Resolution == ResolutionPolicyOptional
}
// IsResolvePolicyAlways checks whether the resolution policy of relevant reference is Always.
func (p *Policy) IsResolvePolicyAlways() bool {
if p == nil || p.Resolve == nil {
return false
}
return *p.Resolve == ResolvePolicyAlways
}
// A Reference to a named object.
type Reference = common.Reference
type Reference struct {
// Name of the referenced object.
Name string `json:"name"`
// A NamespacedReference to a named object.
type NamespacedReference = common.NamespacedReference
// Policies for referencing.
// +optional
Policy *Policy `json:"policy,omitempty"`
}
// A TypedReference refers to an object by Name, Kind, and APIVersion. It is
// commonly used to reference cluster-scoped objects or objects where the
// namespace is already known.
type TypedReference = common.TypedReference
type TypedReference struct {
// APIVersion of the referenced object.
APIVersion string `json:"apiVersion"`
// Kind of the referenced object.
Kind string `json:"kind"`
// Name of the referenced object.
Name string `json:"name"`
// UID of the referenced object.
// +optional
UID types.UID `json:"uid,omitempty"`
}
// A Selector selects an object.
type Selector = common.Selector
type Selector struct {
// MatchLabels ensures an object with matching labels is selected.
MatchLabels map[string]string `json:"matchLabels,omitempty"`
// NamespacedSelector selects a namespaced object.
type NamespacedSelector = common.NamespacedSelector
// MatchControllerRef ensures an object with the same controller reference
// as the selecting object is selected.
MatchControllerRef *bool `json:"matchControllerRef,omitempty"`
// ProviderConfigReference is a typed reference to a ProviderConfig
// object, with a known api group.
type ProviderConfigReference = common.ProviderConfigReference
// Policies for selection.
// +optional
Policy *Policy `json:"policy,omitempty"`
}
// SetGroupVersionKind sets the Kind and APIVersion of a TypedReference.
func (obj *TypedReference) SetGroupVersionKind(gvk schema.GroupVersionKind) {
obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind()
}
// GroupVersionKind gets the GroupVersionKind of a TypedReference.
func (obj *TypedReference) GroupVersionKind() schema.GroupVersionKind {
return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind)
}
// GetObjectKind get the ObjectKind of a TypedReference.
func (obj *TypedReference) GetObjectKind() schema.ObjectKind { return obj }
// TODO(negz): Rename Resource* to Managed* to clarify that they enable the
// resource.Managed interface.
@ -103,36 +177,35 @@ type ResourceSpec struct {
// Secret to which any connection details for this managed resource should
// be written. Connection details frequently include the endpoint, username,
// and password required to connect to the managed resource.
// This field is planned to be replaced in a future release in favor of
// PublishConnectionDetailsTo. Currently, both could be set independently
// and connection details would be published to both without affecting
// each other.
// +optional
WriteConnectionSecretToReference *SecretReference `json:"writeConnectionSecretToRef,omitempty"`
// PublishConnectionDetailsTo specifies the connection secret config which
// contains a name, metadata and a reference to secret store config to
// which any connection details for this managed resource should be written.
// Connection details frequently include the endpoint, username,
// and password required to connect to the managed resource.
// +optional
PublishConnectionDetailsTo *PublishConnectionDetailsTo `json:"publishConnectionDetailsTo,omitempty"`
// ProviderConfigReference specifies how the provider that will be used to
// create, observe, update, and delete this managed resource should be
// configured.
// +kubebuilder:default={"name": "default"}
ProviderConfigReference *Reference `json:"providerConfigRef,omitempty"`
// THIS IS A BETA FIELD. It is on by default but can be opted out
// through a Crossplane feature flag.
// ManagementPolicies specify the array of actions Crossplane is allowed to
// take on the managed and external resources.
// This field is planned to replace the DeletionPolicy field in a future
// release. Currently, both could be set independently and non-default
// values would be honored if the feature flag is enabled. If both are
// custom, the DeletionPolicy field will be ignored.
// See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223
// and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md
// +optional
// +kubebuilder:default={"*"}
ManagementPolicies ManagementPolicies `json:"managementPolicies,omitempty"`
// ProviderReference specifies the provider that will be used to create,
// observe, update, and delete this managed resource.
// Deprecated: Please use ProviderConfigReference, i.e. `providerConfigRef`
ProviderReference *Reference `json:"providerRef,omitempty"`
// DeletionPolicy specifies what will happen to the underlying external
// when this managed resource is deleted - either "Delete" or "Orphan" the
// external resource.
// This field is planned to be deprecated in favor of the ManagementPolicies
// field in a future release. Currently, both could be set independently and
// non-default values would be honored if the feature flag is enabled.
// See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223
// +optional
// +kubebuilder:default=Delete
DeletionPolicy DeletionPolicy `json:"deletionPolicy,omitempty"`
@ -141,53 +214,84 @@ type ResourceSpec struct {
// ResourceStatus represents the observed state of a managed resource.
type ResourceStatus struct {
ConditionedStatus `json:",inline"`
ObservedStatus `json:",inline"`
}
// A CredentialsSource is a source from which provider credentials may be
// acquired.
type CredentialsSource = common.CredentialsSource
type CredentialsSource string
const (
// CredentialsSourceNone indicates that a provider does not require
// credentials.
CredentialsSourceNone = common.CredentialsSourceNone
CredentialsSourceNone CredentialsSource = "None"
// CredentialsSourceSecret indicates that a provider should acquire
// credentials from a secret.
CredentialsSourceSecret = common.CredentialsSourceSecret
CredentialsSourceSecret CredentialsSource = "Secret"
// CredentialsSourceInjectedIdentity indicates that a provider should use
// credentials via its (pod's) identity; i.e. via IRSA for AWS,
// Workload Identity for GCP, Pod Identity for Azure, or in-cluster
// authentication for the Kubernetes API.
CredentialsSourceInjectedIdentity = common.CredentialsSourceInjectedIdentity
CredentialsSourceInjectedIdentity CredentialsSource = "InjectedIdentity"
// CredentialsSourceEnvironment indicates that a provider should acquire
// credentials from an environment variable.
CredentialsSourceEnvironment = common.CredentialsSourceEnvironment
CredentialsSourceEnvironment CredentialsSource = "Environment"
// CredentialsSourceFilesystem indicates that a provider should acquire
// credentials from the filesystem.
CredentialsSourceFilesystem = common.CredentialsSourceFilesystem
CredentialsSourceFilesystem CredentialsSource = "Filesystem"
)
// CommonCredentialSelectors provides common selectors for extracting
// credentials.
type CommonCredentialSelectors = common.CommonCredentialSelectors
type CommonCredentialSelectors struct {
// Fs is a reference to a filesystem location that contains credentials that
// must be used to connect to the provider.
// +optional
Fs *FsSelector `json:"fs,omitempty"`
// Env is a reference to an environment variable that contains credentials
// that must be used to connect to the provider.
// +optional
Env *EnvSelector `json:"env,omitempty"`
// A SecretRef is a reference to a secret key that contains the credentials
// that must be used to connect to the provider.
// +optional
SecretRef *SecretKeySelector `json:"secretRef,omitempty"`
}
// EnvSelector selects an environment variable.
type EnvSelector = common.EnvSelector
type EnvSelector struct {
// Name is the name of an environment variable.
Name string `json:"name"`
}
// FsSelector selects a filesystem location.
type FsSelector = common.FsSelector
type FsSelector struct {
// Path is a filesystem path.
Path string `json:"path"`
}
// A ProviderConfigStatus defines the observed status of a ProviderConfig.
type ProviderConfigStatus = common.ProviderConfigStatus
type ProviderConfigStatus struct {
ConditionedStatus `json:",inline"`
// Users of this provider configuration.
Users int64 `json:"users,omitempty"`
}
// A ProviderConfigUsage is a record that a particular managed resource is using
// a particular provider configuration.
type ProviderConfigUsage = common.ProviderConfigUsage
type ProviderConfigUsage struct {
// ProviderConfigReference to the provider config being used.
ProviderConfigReference Reference `json:"providerConfigRef"`
// ResourceReference to the managed resource using the provider config.
ResourceReference TypedReference `json:"resourceRef"`
}
// A TargetSpec defines the common fields of objects used for exposing
// infrastructure to workloads that can be scheduled to.

View File

@ -1,7 +1,8 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2025 The Crossplane Authors.
Copyright 2019 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -24,6 +25,313 @@ import (
corev1 "k8s.io/api/core/v1"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CommonCredentialSelectors) DeepCopyInto(out *CommonCredentialSelectors) {
*out = *in
if in.Fs != nil {
in, out := &in.Fs, &out.Fs
*out = new(FsSelector)
**out = **in
}
if in.Env != nil {
in, out := &in.Env, &out.Env
*out = new(EnvSelector)
**out = **in
}
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(SecretKeySelector)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonCredentialSelectors.
func (in *CommonCredentialSelectors) DeepCopy() *CommonCredentialSelectors {
if in == nil {
return nil
}
out := new(CommonCredentialSelectors)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Condition) DeepCopyInto(out *Condition) {
*out = *in
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition.
func (in *Condition) DeepCopy() *Condition {
if in == nil {
return nil
}
out := new(Condition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ConditionedStatus) DeepCopyInto(out *ConditionedStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionedStatus.
func (in *ConditionedStatus) DeepCopy() *ConditionedStatus {
if in == nil {
return nil
}
out := new(ConditionedStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ConnectionSecretMetadata) DeepCopyInto(out *ConnectionSecretMetadata) {
*out = *in
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Annotations != nil {
in, out := &in.Annotations, &out.Annotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Type != nil {
in, out := &in.Type, &out.Type
*out = new(corev1.SecretType)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionSecretMetadata.
func (in *ConnectionSecretMetadata) DeepCopy() *ConnectionSecretMetadata {
if in == nil {
return nil
}
out := new(ConnectionSecretMetadata)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EnvSelector) DeepCopyInto(out *EnvSelector) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvSelector.
func (in *EnvSelector) DeepCopy() *EnvSelector {
if in == nil {
return nil
}
out := new(EnvSelector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FsSelector) DeepCopyInto(out *FsSelector) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FsSelector.
func (in *FsSelector) DeepCopy() *FsSelector {
if in == nil {
return nil
}
out := new(FsSelector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubernetesAuthConfig) DeepCopyInto(out *KubernetesAuthConfig) {
*out = *in
in.CommonCredentialSelectors.DeepCopyInto(&out.CommonCredentialSelectors)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesAuthConfig.
func (in *KubernetesAuthConfig) DeepCopy() *KubernetesAuthConfig {
if in == nil {
return nil
}
out := new(KubernetesAuthConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubernetesSecretStoreConfig) DeepCopyInto(out *KubernetesSecretStoreConfig) {
*out = *in
in.Auth.DeepCopyInto(&out.Auth)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesSecretStoreConfig.
func (in *KubernetesSecretStoreConfig) DeepCopy() *KubernetesSecretStoreConfig {
if in == nil {
return nil
}
out := new(KubernetesSecretStoreConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LocalSecretReference) DeepCopyInto(out *LocalSecretReference) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalSecretReference.
func (in *LocalSecretReference) DeepCopy() *LocalSecretReference {
if in == nil {
return nil
}
out := new(LocalSecretReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MergeOptions) DeepCopyInto(out *MergeOptions) {
*out = *in
if in.KeepMapValues != nil {
in, out := &in.KeepMapValues, &out.KeepMapValues
*out = new(bool)
**out = **in
}
if in.AppendSlice != nil {
in, out := &in.AppendSlice, &out.AppendSlice
*out = new(bool)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MergeOptions.
func (in *MergeOptions) DeepCopy() *MergeOptions {
if in == nil {
return nil
}
out := new(MergeOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Policy) DeepCopyInto(out *Policy) {
*out = *in
if in.Resolve != nil {
in, out := &in.Resolve, &out.Resolve
*out = new(ResolvePolicy)
**out = **in
}
if in.Resolution != nil {
in, out := &in.Resolution, &out.Resolution
*out = new(ResolutionPolicy)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy.
func (in *Policy) DeepCopy() *Policy {
if in == nil {
return nil
}
out := new(Policy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ProviderConfigStatus) DeepCopyInto(out *ProviderConfigStatus) {
*out = *in
in.ConditionedStatus.DeepCopyInto(&out.ConditionedStatus)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfigStatus.
func (in *ProviderConfigStatus) DeepCopy() *ProviderConfigStatus {
if in == nil {
return nil
}
out := new(ProviderConfigStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ProviderConfigUsage) DeepCopyInto(out *ProviderConfigUsage) {
*out = *in
in.ProviderConfigReference.DeepCopyInto(&out.ProviderConfigReference)
out.ResourceReference = in.ResourceReference
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfigUsage.
func (in *ProviderConfigUsage) DeepCopy() *ProviderConfigUsage {
if in == nil {
return nil
}
out := new(ProviderConfigUsage)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PublishConnectionDetailsTo) DeepCopyInto(out *PublishConnectionDetailsTo) {
*out = *in
if in.Metadata != nil {
in, out := &in.Metadata, &out.Metadata
*out = new(ConnectionSecretMetadata)
(*in).DeepCopyInto(*out)
}
if in.SecretStoreConfigRef != nil {
in, out := &in.SecretStoreConfigRef, &out.SecretStoreConfigRef
*out = new(Reference)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublishConnectionDetailsTo.
func (in *PublishConnectionDetailsTo) DeepCopy() *PublishConnectionDetailsTo {
if in == nil {
return nil
}
out := new(PublishConnectionDetailsTo)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Reference) DeepCopyInto(out *Reference) {
*out = *in
if in.Policy != nil {
in, out := &in.Policy, &out.Policy
*out = new(Policy)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Reference.
func (in *Reference) DeepCopy() *Reference {
if in == nil {
return nil
}
out := new(Reference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceSpec) DeepCopyInto(out *ResourceSpec) {
*out = *in
@ -32,15 +340,20 @@ func (in *ResourceSpec) DeepCopyInto(out *ResourceSpec) {
*out = new(SecretReference)
**out = **in
}
if in.PublishConnectionDetailsTo != nil {
in, out := &in.PublishConnectionDetailsTo, &out.PublishConnectionDetailsTo
*out = new(PublishConnectionDetailsTo)
(*in).DeepCopyInto(*out)
}
if in.ProviderConfigReference != nil {
in, out := &in.ProviderConfigReference, &out.ProviderConfigReference
*out = new(Reference)
(*in).DeepCopyInto(*out)
}
if in.ManagementPolicies != nil {
in, out := &in.ManagementPolicies, &out.ManagementPolicies
*out = make(ManagementPolicies, len(*in))
copy(*out, *in)
if in.ProviderReference != nil {
in, out := &in.ProviderReference, &out.ProviderReference
*out = new(Reference)
(*in).DeepCopyInto(*out)
}
}
@ -58,7 +371,6 @@ func (in *ResourceSpec) DeepCopy() *ResourceSpec {
func (in *ResourceStatus) DeepCopyInto(out *ResourceStatus) {
*out = *in
in.ConditionedStatus.DeepCopyInto(&out.ConditionedStatus)
in.ObservedStatus.DeepCopyInto(&out.ObservedStatus)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceStatus.
@ -71,6 +383,99 @@ func (in *ResourceStatus) DeepCopy() *ResourceStatus {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SecretKeySelector) DeepCopyInto(out *SecretKeySelector) {
*out = *in
out.SecretReference = in.SecretReference
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretKeySelector.
func (in *SecretKeySelector) DeepCopy() *SecretKeySelector {
if in == nil {
return nil
}
out := new(SecretKeySelector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SecretReference) DeepCopyInto(out *SecretReference) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference.
func (in *SecretReference) DeepCopy() *SecretReference {
if in == nil {
return nil
}
out := new(SecretReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SecretStoreConfig) DeepCopyInto(out *SecretStoreConfig) {
*out = *in
if in.Type != nil {
in, out := &in.Type, &out.Type
*out = new(SecretStoreType)
**out = **in
}
if in.Kubernetes != nil {
in, out := &in.Kubernetes, &out.Kubernetes
*out = new(KubernetesSecretStoreConfig)
(*in).DeepCopyInto(*out)
}
if in.Vault != nil {
in, out := &in.Vault, &out.Vault
*out = new(VaultSecretStoreConfig)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretStoreConfig.
func (in *SecretStoreConfig) DeepCopy() *SecretStoreConfig {
if in == nil {
return nil
}
out := new(SecretStoreConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Selector) DeepCopyInto(out *Selector) {
*out = *in
if in.MatchLabels != nil {
in, out := &in.MatchLabels, &out.MatchLabels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.MatchControllerRef != nil {
in, out := &in.MatchControllerRef, &out.MatchControllerRef
*out = new(bool)
**out = **in
}
if in.Policy != nil {
in, out := &in.Policy, &out.Policy
*out = new(Policy)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Selector.
func (in *Selector) DeepCopy() *Selector {
if in == nil {
return nil
}
out := new(Selector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TargetSpec) DeepCopyInto(out *TargetSpec) {
*out = *in
@ -111,3 +516,96 @@ func (in *TargetStatus) DeepCopy() *TargetStatus {
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TypedReference) DeepCopyInto(out *TypedReference) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypedReference.
func (in *TypedReference) DeepCopy() *TypedReference {
if in == nil {
return nil
}
out := new(TypedReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VaultAuthConfig) DeepCopyInto(out *VaultAuthConfig) {
*out = *in
if in.Token != nil {
in, out := &in.Token, &out.Token
*out = new(VaultAuthTokenConfig)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultAuthConfig.
func (in *VaultAuthConfig) DeepCopy() *VaultAuthConfig {
if in == nil {
return nil
}
out := new(VaultAuthConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VaultAuthTokenConfig) DeepCopyInto(out *VaultAuthTokenConfig) {
*out = *in
in.CommonCredentialSelectors.DeepCopyInto(&out.CommonCredentialSelectors)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultAuthTokenConfig.
func (in *VaultAuthTokenConfig) DeepCopy() *VaultAuthTokenConfig {
if in == nil {
return nil
}
out := new(VaultAuthTokenConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VaultCABundleConfig) DeepCopyInto(out *VaultCABundleConfig) {
*out = *in
in.CommonCredentialSelectors.DeepCopyInto(&out.CommonCredentialSelectors)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultCABundleConfig.
func (in *VaultCABundleConfig) DeepCopy() *VaultCABundleConfig {
if in == nil {
return nil
}
out := new(VaultCABundleConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VaultSecretStoreConfig) DeepCopyInto(out *VaultSecretStoreConfig) {
*out = *in
if in.Version != nil {
in, out := &in.Version, &out.Version
*out = new(VaultKVVersion)
**out = **in
}
if in.CABundle != nil {
in, out := &in.CABundle, &out.CABundle
*out = new(VaultCABundleConfig)
(*in).DeepCopyInto(*out)
}
in.Auth.DeepCopyInto(&out.Auth)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultSecretStoreConfig.
func (in *VaultSecretStoreConfig) DeepCopy() *VaultSecretStoreConfig {
if in == nil {
return nil
}
out := new(VaultSecretStoreConfig)
in.DeepCopyInto(out)
return out
}

View File

@ -1,51 +0,0 @@
/*
Copyright 2019 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v2
import (
"github.com/crossplane/crossplane-runtime/apis/common"
)
// A ManagedResourceSpec defines the desired state of a managed resource.
type ManagedResourceSpec struct {
// WriteConnectionSecretToReference specifies the namespace and name of a
// Secret to which any connection details for this managed resource should
// be written. Connection details frequently include the endpoint, username,
// and password required to connect to the managed resource.
// +optional
WriteConnectionSecretToReference *common.LocalSecretReference `json:"writeConnectionSecretToRef,omitempty"`
// ProviderConfigReference specifies how the provider that will be used to
// create, observe, update, and delete this managed resource should be
// configured.
// +kubebuilder:default={"kind": "ClusterProviderConfig", "name": "default"}
ProviderConfigReference *common.ProviderConfigReference `json:"providerConfigRef,omitempty"`
// THIS IS A BETA FIELD. It is on by default but can be opted out
// through a Crossplane feature flag.
// ManagementPolicies specify the array of actions Crossplane is allowed to
// take on the managed and external resources.
// See the design doc for more information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223
// and this one: https://github.com/crossplane/crossplane/blob/444267e84783136daa93568b364a5f01228cacbe/design/one-pager-ignore-changes.md
// +optional
// +kubebuilder:default={"*"}
ManagementPolicies common.ManagementPolicies `json:"managementPolicies,omitempty"`
}
// A TypedProviderConfigUsage is a record that a particular managed resource is using
// a particular provider configuration.
type TypedProviderConfigUsage = common.TypedProviderConfigUsage

View File

@ -1,55 +0,0 @@
//go:build !ignore_autogenerated
/*
Copyright 2025 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package v2
import (
"github.com/crossplane/crossplane-runtime/apis/common"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ManagedResourceSpec) DeepCopyInto(out *ManagedResourceSpec) {
*out = *in
if in.WriteConnectionSecretToReference != nil {
in, out := &in.WriteConnectionSecretToReference, &out.WriteConnectionSecretToReference
*out = new(common.LocalSecretReference)
**out = **in
}
if in.ProviderConfigReference != nil {
in, out := &in.ProviderConfigReference, &out.ProviderConfigReference
*out = new(common.ProviderConfigReference)
**out = **in
}
if in.ManagementPolicies != nil {
in, out := &in.ManagementPolicies, &out.ManagementPolicies
*out = make(common.ManagementPolicies, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedResourceSpec.
func (in *ManagedResourceSpec) DeepCopy() *ManagedResourceSpec {
if in == nil {
return nil
}
out := new(ManagedResourceSpec)
in.DeepCopyInto(out)
return out
}

View File

@ -1,468 +0,0 @@
//go:build !ignore_autogenerated
/*
Copyright 2025 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package common
import ()
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CommonCredentialSelectors) DeepCopyInto(out *CommonCredentialSelectors) {
*out = *in
if in.Fs != nil {
in, out := &in.Fs, &out.Fs
*out = new(FsSelector)
**out = **in
}
if in.Env != nil {
in, out := &in.Env, &out.Env
*out = new(EnvSelector)
**out = **in
}
if in.SecretRef != nil {
in, out := &in.SecretRef, &out.SecretRef
*out = new(SecretKeySelector)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonCredentialSelectors.
func (in *CommonCredentialSelectors) DeepCopy() *CommonCredentialSelectors {
if in == nil {
return nil
}
out := new(CommonCredentialSelectors)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Condition) DeepCopyInto(out *Condition) {
*out = *in
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition.
func (in *Condition) DeepCopy() *Condition {
if in == nil {
return nil
}
out := new(Condition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ConditionedStatus) DeepCopyInto(out *ConditionedStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionedStatus.
func (in *ConditionedStatus) DeepCopy() *ConditionedStatus {
if in == nil {
return nil
}
out := new(ConditionedStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EnvSelector) DeepCopyInto(out *EnvSelector) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvSelector.
func (in *EnvSelector) DeepCopy() *EnvSelector {
if in == nil {
return nil
}
out := new(EnvSelector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FsSelector) DeepCopyInto(out *FsSelector) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FsSelector.
func (in *FsSelector) DeepCopy() *FsSelector {
if in == nil {
return nil
}
out := new(FsSelector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LocalSecretKeySelector) DeepCopyInto(out *LocalSecretKeySelector) {
*out = *in
out.LocalSecretReference = in.LocalSecretReference
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalSecretKeySelector.
func (in *LocalSecretKeySelector) DeepCopy() *LocalSecretKeySelector {
if in == nil {
return nil
}
out := new(LocalSecretKeySelector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LocalSecretReference) DeepCopyInto(out *LocalSecretReference) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalSecretReference.
func (in *LocalSecretReference) DeepCopy() *LocalSecretReference {
if in == nil {
return nil
}
out := new(LocalSecretReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in ManagementPolicies) DeepCopyInto(out *ManagementPolicies) {
{
in := &in
*out = make(ManagementPolicies, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagementPolicies.
func (in ManagementPolicies) DeepCopy() ManagementPolicies {
if in == nil {
return nil
}
out := new(ManagementPolicies)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MergeOptions) DeepCopyInto(out *MergeOptions) {
*out = *in
if in.KeepMapValues != nil {
in, out := &in.KeepMapValues, &out.KeepMapValues
*out = new(bool)
**out = **in
}
if in.AppendSlice != nil {
in, out := &in.AppendSlice, &out.AppendSlice
*out = new(bool)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MergeOptions.
func (in *MergeOptions) DeepCopy() *MergeOptions {
if in == nil {
return nil
}
out := new(MergeOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamespacedReference) DeepCopyInto(out *NamespacedReference) {
*out = *in
if in.Policy != nil {
in, out := &in.Policy, &out.Policy
*out = new(Policy)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespacedReference.
func (in *NamespacedReference) DeepCopy() *NamespacedReference {
if in == nil {
return nil
}
out := new(NamespacedReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamespacedSelector) DeepCopyInto(out *NamespacedSelector) {
*out = *in
if in.MatchLabels != nil {
in, out := &in.MatchLabels, &out.MatchLabels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.MatchControllerRef != nil {
in, out := &in.MatchControllerRef, &out.MatchControllerRef
*out = new(bool)
**out = **in
}
if in.Policy != nil {
in, out := &in.Policy, &out.Policy
*out = new(Policy)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespacedSelector.
func (in *NamespacedSelector) DeepCopy() *NamespacedSelector {
if in == nil {
return nil
}
out := new(NamespacedSelector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ObservedStatus) DeepCopyInto(out *ObservedStatus) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObservedStatus.
func (in *ObservedStatus) DeepCopy() *ObservedStatus {
if in == nil {
return nil
}
out := new(ObservedStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Policy) DeepCopyInto(out *Policy) {
*out = *in
if in.Resolve != nil {
in, out := &in.Resolve, &out.Resolve
*out = new(ResolvePolicy)
**out = **in
}
if in.Resolution != nil {
in, out := &in.Resolution, &out.Resolution
*out = new(ResolutionPolicy)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy.
func (in *Policy) DeepCopy() *Policy {
if in == nil {
return nil
}
out := new(Policy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ProviderConfigReference) DeepCopyInto(out *ProviderConfigReference) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfigReference.
func (in *ProviderConfigReference) DeepCopy() *ProviderConfigReference {
if in == nil {
return nil
}
out := new(ProviderConfigReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ProviderConfigStatus) DeepCopyInto(out *ProviderConfigStatus) {
*out = *in
in.ConditionedStatus.DeepCopyInto(&out.ConditionedStatus)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfigStatus.
func (in *ProviderConfigStatus) DeepCopy() *ProviderConfigStatus {
if in == nil {
return nil
}
out := new(ProviderConfigStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ProviderConfigUsage) DeepCopyInto(out *ProviderConfigUsage) {
*out = *in
in.ProviderConfigReference.DeepCopyInto(&out.ProviderConfigReference)
out.ResourceReference = in.ResourceReference
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfigUsage.
func (in *ProviderConfigUsage) DeepCopy() *ProviderConfigUsage {
if in == nil {
return nil
}
out := new(ProviderConfigUsage)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Reference) DeepCopyInto(out *Reference) {
*out = *in
if in.Policy != nil {
in, out := &in.Policy, &out.Policy
*out = new(Policy)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Reference.
func (in *Reference) DeepCopy() *Reference {
if in == nil {
return nil
}
out := new(Reference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceStatus) DeepCopyInto(out *ResourceStatus) {
*out = *in
in.ConditionedStatus.DeepCopyInto(&out.ConditionedStatus)
out.ObservedStatus = in.ObservedStatus
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceStatus.
func (in *ResourceStatus) DeepCopy() *ResourceStatus {
if in == nil {
return nil
}
out := new(ResourceStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SecretKeySelector) DeepCopyInto(out *SecretKeySelector) {
*out = *in
out.SecretReference = in.SecretReference
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretKeySelector.
func (in *SecretKeySelector) DeepCopy() *SecretKeySelector {
if in == nil {
return nil
}
out := new(SecretKeySelector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SecretReference) DeepCopyInto(out *SecretReference) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference.
func (in *SecretReference) DeepCopy() *SecretReference {
if in == nil {
return nil
}
out := new(SecretReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Selector) DeepCopyInto(out *Selector) {
*out = *in
if in.MatchLabels != nil {
in, out := &in.MatchLabels, &out.MatchLabels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.MatchControllerRef != nil {
in, out := &in.MatchControllerRef, &out.MatchControllerRef
*out = new(bool)
**out = **in
}
if in.Policy != nil {
in, out := &in.Policy, &out.Policy
*out = new(Policy)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Selector.
func (in *Selector) DeepCopy() *Selector {
if in == nil {
return nil
}
out := new(Selector)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TypedProviderConfigUsage) DeepCopyInto(out *TypedProviderConfigUsage) {
*out = *in
out.ProviderConfigReference = in.ProviderConfigReference
out.ResourceReference = in.ResourceReference
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypedProviderConfigUsage.
func (in *TypedProviderConfigUsage) DeepCopy() *TypedProviderConfigUsage {
if in == nil {
return nil
}
out := new(TypedProviderConfigUsage)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TypedReference) DeepCopyInto(out *TypedReference) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypedReference.
func (in *TypedReference) DeepCopy() *TypedReference {
if in == nil {
return nil
}
out := new(TypedReference)
in.DeepCopyInto(out)
return out
}

View File

@ -1,607 +0,0 @@
//
//Copyright 2023 The Crossplane Authors.
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//http://www.apache.org/licenses/LICENSE-2.0
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.5
// protoc (unknown)
// source: proto/v1alpha1/ess.proto
// buf:lint:ignore PACKAGE_DIRECTORY_MATCH
package v1alpha1
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// ConfigReference is used to refer a StoreConfig object.
type ConfigReference struct {
state protoimpl.MessageState `protogen:"open.v1"`
ApiVersion string `protobuf:"bytes,1,opt,name=api_version,json=apiVersion,proto3" json:"api_version,omitempty"`
Kind string `protobuf:"bytes,2,opt,name=kind,proto3" json:"kind,omitempty"`
Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ConfigReference) Reset() {
*x = ConfigReference{}
mi := &file_proto_v1alpha1_ess_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ConfigReference) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ConfigReference) ProtoMessage() {}
func (x *ConfigReference) ProtoReflect() protoreflect.Message {
mi := &file_proto_v1alpha1_ess_proto_msgTypes[0]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ConfigReference.ProtoReflect.Descriptor instead.
func (*ConfigReference) Descriptor() ([]byte, []int) {
return file_proto_v1alpha1_ess_proto_rawDescGZIP(), []int{0}
}
func (x *ConfigReference) GetApiVersion() string {
if x != nil {
return x.ApiVersion
}
return ""
}
func (x *ConfigReference) GetKind() string {
if x != nil {
return x.Kind
}
return ""
}
func (x *ConfigReference) GetName() string {
if x != nil {
return x.Name
}
return ""
}
// Secret defines the structure of a secret.
type Secret struct {
state protoimpl.MessageState `protogen:"open.v1"`
ScopedName string `protobuf:"bytes,1,opt,name=scoped_name,json=scopedName,proto3" json:"scoped_name,omitempty"`
Metadata map[string]string `protobuf:"bytes,2,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
Data map[string][]byte `protobuf:"bytes,3,rep,name=data,proto3" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *Secret) Reset() {
*x = Secret{}
mi := &file_proto_v1alpha1_ess_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *Secret) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Secret) ProtoMessage() {}
func (x *Secret) ProtoReflect() protoreflect.Message {
mi := &file_proto_v1alpha1_ess_proto_msgTypes[1]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Secret.ProtoReflect.Descriptor instead.
func (*Secret) Descriptor() ([]byte, []int) {
return file_proto_v1alpha1_ess_proto_rawDescGZIP(), []int{1}
}
func (x *Secret) GetScopedName() string {
if x != nil {
return x.ScopedName
}
return ""
}
func (x *Secret) GetMetadata() map[string]string {
if x != nil {
return x.Metadata
}
return nil
}
func (x *Secret) GetData() map[string][]byte {
if x != nil {
return x.Data
}
return nil
}
// GetSecretRequest requests secret from the secret store.
type GetSecretRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Config *ConfigReference `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
Secret *Secret `protobuf:"bytes,2,opt,name=secret,proto3" json:"secret,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GetSecretRequest) Reset() {
*x = GetSecretRequest{}
mi := &file_proto_v1alpha1_ess_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GetSecretRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetSecretRequest) ProtoMessage() {}
func (x *GetSecretRequest) ProtoReflect() protoreflect.Message {
mi := &file_proto_v1alpha1_ess_proto_msgTypes[2]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetSecretRequest.ProtoReflect.Descriptor instead.
func (*GetSecretRequest) Descriptor() ([]byte, []int) {
return file_proto_v1alpha1_ess_proto_rawDescGZIP(), []int{2}
}
func (x *GetSecretRequest) GetConfig() *ConfigReference {
if x != nil {
return x.Config
}
return nil
}
func (x *GetSecretRequest) GetSecret() *Secret {
if x != nil {
return x.Secret
}
return nil
}
// GetSecretResponse returns the secret from the secret store.
type GetSecretResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
Secret *Secret `protobuf:"bytes,1,opt,name=secret,proto3" json:"secret,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *GetSecretResponse) Reset() {
*x = GetSecretResponse{}
mi := &file_proto_v1alpha1_ess_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *GetSecretResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GetSecretResponse) ProtoMessage() {}
func (x *GetSecretResponse) ProtoReflect() protoreflect.Message {
mi := &file_proto_v1alpha1_ess_proto_msgTypes[3]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GetSecretResponse.ProtoReflect.Descriptor instead.
func (*GetSecretResponse) Descriptor() ([]byte, []int) {
return file_proto_v1alpha1_ess_proto_rawDescGZIP(), []int{3}
}
func (x *GetSecretResponse) GetSecret() *Secret {
if x != nil {
return x.Secret
}
return nil
}
// ApplySecretRequest applies the secret data update to the secret store.
type ApplySecretRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Config *ConfigReference `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
Secret *Secret `protobuf:"bytes,2,opt,name=secret,proto3" json:"secret,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ApplySecretRequest) Reset() {
*x = ApplySecretRequest{}
mi := &file_proto_v1alpha1_ess_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ApplySecretRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ApplySecretRequest) ProtoMessage() {}
func (x *ApplySecretRequest) ProtoReflect() protoreflect.Message {
mi := &file_proto_v1alpha1_ess_proto_msgTypes[4]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ApplySecretRequest.ProtoReflect.Descriptor instead.
func (*ApplySecretRequest) Descriptor() ([]byte, []int) {
return file_proto_v1alpha1_ess_proto_rawDescGZIP(), []int{4}
}
func (x *ApplySecretRequest) GetConfig() *ConfigReference {
if x != nil {
return x.Config
}
return nil
}
func (x *ApplySecretRequest) GetSecret() *Secret {
if x != nil {
return x.Secret
}
return nil
}
// ApplySecretResponse returns if the secret is changed or not.
type ApplySecretResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
Changed bool `protobuf:"varint,1,opt,name=changed,proto3" json:"changed,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *ApplySecretResponse) Reset() {
*x = ApplySecretResponse{}
mi := &file_proto_v1alpha1_ess_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *ApplySecretResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ApplySecretResponse) ProtoMessage() {}
func (x *ApplySecretResponse) ProtoReflect() protoreflect.Message {
mi := &file_proto_v1alpha1_ess_proto_msgTypes[5]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ApplySecretResponse.ProtoReflect.Descriptor instead.
func (*ApplySecretResponse) Descriptor() ([]byte, []int) {
return file_proto_v1alpha1_ess_proto_rawDescGZIP(), []int{5}
}
func (x *ApplySecretResponse) GetChanged() bool {
if x != nil {
return x.Changed
}
return false
}
// DeleteKeysRequest deletes the secret from the secret store.
type DeleteKeysRequest struct {
state protoimpl.MessageState `protogen:"open.v1"`
Config *ConfigReference `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
Secret *Secret `protobuf:"bytes,2,opt,name=secret,proto3" json:"secret,omitempty"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DeleteKeysRequest) Reset() {
*x = DeleteKeysRequest{}
mi := &file_proto_v1alpha1_ess_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DeleteKeysRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DeleteKeysRequest) ProtoMessage() {}
func (x *DeleteKeysRequest) ProtoReflect() protoreflect.Message {
mi := &file_proto_v1alpha1_ess_proto_msgTypes[6]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DeleteKeysRequest.ProtoReflect.Descriptor instead.
func (*DeleteKeysRequest) Descriptor() ([]byte, []int) {
return file_proto_v1alpha1_ess_proto_rawDescGZIP(), []int{6}
}
func (x *DeleteKeysRequest) GetConfig() *ConfigReference {
if x != nil {
return x.Config
}
return nil
}
func (x *DeleteKeysRequest) GetSecret() *Secret {
if x != nil {
return x.Secret
}
return nil
}
// DeleteKeysResponse is returned if the secret is deleted.
type DeleteKeysResponse struct {
state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
sizeCache protoimpl.SizeCache
}
func (x *DeleteKeysResponse) Reset() {
*x = DeleteKeysResponse{}
mi := &file_proto_v1alpha1_ess_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
func (x *DeleteKeysResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DeleteKeysResponse) ProtoMessage() {}
func (x *DeleteKeysResponse) ProtoReflect() protoreflect.Message {
mi := &file_proto_v1alpha1_ess_proto_msgTypes[7]
if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DeleteKeysResponse.ProtoReflect.Descriptor instead.
func (*DeleteKeysResponse) Descriptor() ([]byte, []int) {
return file_proto_v1alpha1_ess_proto_rawDescGZIP(), []int{7}
}
var File_proto_v1alpha1_ess_proto protoreflect.FileDescriptor
var file_proto_v1alpha1_ess_proto_rawDesc = string([]byte{
0x0a, 0x18, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31,
0x2f, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x65, 0x73, 0x73, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x22, 0x5a,
0x0a, 0x0f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63,
0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69,
0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03,
0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x9f, 0x02, 0x0a, 0x06, 0x53,
0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x5f,
0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x63, 0x6f, 0x70,
0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x44, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61,
0x74, 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x73, 0x73, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x65,
0x63, 0x72, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74,
0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x38, 0x0a, 0x04,
0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x73, 0x73,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e,
0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79,
0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a,
0x02, 0x38, 0x01, 0x1a, 0x37, 0x0a, 0x09, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79,
0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b,
0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x83, 0x01, 0x0a,
0x10, 0x47, 0x65, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x12, 0x3b, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x23, 0x2e, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31,
0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x66,
0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x32,
0x0a, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
0x2e, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70,
0x68, 0x61, 0x31, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x06, 0x73, 0x65, 0x63, 0x72,
0x65, 0x74, 0x22, 0x47, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65,
0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x65, 0x63,
0x72, 0x65, 0x74, 0x52, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x22, 0x85, 0x01, 0x0a, 0x12,
0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x12, 0x3b, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76,
0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65,
0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12,
0x32, 0x0a, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x1a, 0x2e, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x61, 0x6c,
0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x06, 0x73, 0x65, 0x63,
0x72, 0x65, 0x74, 0x22, 0x2f, 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x65, 0x63, 0x72,
0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x68,
0x61, 0x6e, 0x67, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x63, 0x68, 0x61,
0x6e, 0x67, 0x65, 0x64, 0x22, 0x84, 0x01, 0x0a, 0x11, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b,
0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x06, 0x63, 0x6f,
0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x73, 0x73,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e,
0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52,
0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x32, 0x0a, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65,
0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x65, 0x63,
0x72, 0x65, 0x74, 0x52, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x22, 0x14, 0x0a, 0x12, 0x44,
0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
0x65, 0x32, 0xbf, 0x02, 0x0a, 0x20, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x53, 0x65,
0x63, 0x72, 0x65, 0x74, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x53,
0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x5a, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x65, 0x63,
0x72, 0x65, 0x74, 0x12, 0x24, 0x2e, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x63, 0x72,
0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x65, 0x73, 0x73, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x47,
0x65, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x22, 0x00, 0x12, 0x60, 0x0a, 0x0b, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x65, 0x63, 0x72, 0x65,
0x74, 0x12, 0x26, 0x2e, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31,
0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x65, 0x63, 0x72,
0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x65, 0x73, 0x73, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x41,
0x70, 0x70, 0x6c, 0x79, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x0a, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65,
0x79, 0x73, 0x12, 0x25, 0x2e, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76,
0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65,
0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x65, 0x73, 0x73, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44,
0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
0x65, 0x22, 0x00, 0x42, 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f,
0x6d, 0x2f, 0x63, 0x72, 0x6f, 0x73, 0x73, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x63, 0x72, 0x6f,
0x73, 0x73, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2d, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2f,
0x61, 0x70, 0x69, 0x73, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70,
0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
})
var (
file_proto_v1alpha1_ess_proto_rawDescOnce sync.Once
file_proto_v1alpha1_ess_proto_rawDescData []byte
)
func file_proto_v1alpha1_ess_proto_rawDescGZIP() []byte {
file_proto_v1alpha1_ess_proto_rawDescOnce.Do(func() {
file_proto_v1alpha1_ess_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_proto_v1alpha1_ess_proto_rawDesc), len(file_proto_v1alpha1_ess_proto_rawDesc)))
})
return file_proto_v1alpha1_ess_proto_rawDescData
}
var file_proto_v1alpha1_ess_proto_msgTypes = make([]protoimpl.MessageInfo, 10)
var file_proto_v1alpha1_ess_proto_goTypes = []any{
(*ConfigReference)(nil), // 0: ess.proto.v1alpha1.ConfigReference
(*Secret)(nil), // 1: ess.proto.v1alpha1.Secret
(*GetSecretRequest)(nil), // 2: ess.proto.v1alpha1.GetSecretRequest
(*GetSecretResponse)(nil), // 3: ess.proto.v1alpha1.GetSecretResponse
(*ApplySecretRequest)(nil), // 4: ess.proto.v1alpha1.ApplySecretRequest
(*ApplySecretResponse)(nil), // 5: ess.proto.v1alpha1.ApplySecretResponse
(*DeleteKeysRequest)(nil), // 6: ess.proto.v1alpha1.DeleteKeysRequest
(*DeleteKeysResponse)(nil), // 7: ess.proto.v1alpha1.DeleteKeysResponse
nil, // 8: ess.proto.v1alpha1.Secret.MetadataEntry
nil, // 9: ess.proto.v1alpha1.Secret.DataEntry
}
var file_proto_v1alpha1_ess_proto_depIdxs = []int32{
8, // 0: ess.proto.v1alpha1.Secret.metadata:type_name -> ess.proto.v1alpha1.Secret.MetadataEntry
9, // 1: ess.proto.v1alpha1.Secret.data:type_name -> ess.proto.v1alpha1.Secret.DataEntry
0, // 2: ess.proto.v1alpha1.GetSecretRequest.config:type_name -> ess.proto.v1alpha1.ConfigReference
1, // 3: ess.proto.v1alpha1.GetSecretRequest.secret:type_name -> ess.proto.v1alpha1.Secret
1, // 4: ess.proto.v1alpha1.GetSecretResponse.secret:type_name -> ess.proto.v1alpha1.Secret
0, // 5: ess.proto.v1alpha1.ApplySecretRequest.config:type_name -> ess.proto.v1alpha1.ConfigReference
1, // 6: ess.proto.v1alpha1.ApplySecretRequest.secret:type_name -> ess.proto.v1alpha1.Secret
0, // 7: ess.proto.v1alpha1.DeleteKeysRequest.config:type_name -> ess.proto.v1alpha1.ConfigReference
1, // 8: ess.proto.v1alpha1.DeleteKeysRequest.secret:type_name -> ess.proto.v1alpha1.Secret
2, // 9: ess.proto.v1alpha1.ExternalSecretStorePluginService.GetSecret:input_type -> ess.proto.v1alpha1.GetSecretRequest
4, // 10: ess.proto.v1alpha1.ExternalSecretStorePluginService.ApplySecret:input_type -> ess.proto.v1alpha1.ApplySecretRequest
6, // 11: ess.proto.v1alpha1.ExternalSecretStorePluginService.DeleteKeys:input_type -> ess.proto.v1alpha1.DeleteKeysRequest
3, // 12: ess.proto.v1alpha1.ExternalSecretStorePluginService.GetSecret:output_type -> ess.proto.v1alpha1.GetSecretResponse
5, // 13: ess.proto.v1alpha1.ExternalSecretStorePluginService.ApplySecret:output_type -> ess.proto.v1alpha1.ApplySecretResponse
7, // 14: ess.proto.v1alpha1.ExternalSecretStorePluginService.DeleteKeys:output_type -> ess.proto.v1alpha1.DeleteKeysResponse
12, // [12:15] is the sub-list for method output_type
9, // [9:12] is the sub-list for method input_type
9, // [9:9] is the sub-list for extension type_name
9, // [9:9] is the sub-list for extension extendee
0, // [0:9] is the sub-list for field type_name
}
func init() { file_proto_v1alpha1_ess_proto_init() }
func file_proto_v1alpha1_ess_proto_init() {
if File_proto_v1alpha1_ess_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: unsafe.Slice(unsafe.StringData(file_proto_v1alpha1_ess_proto_rawDesc), len(file_proto_v1alpha1_ess_proto_rawDesc)),
NumEnums: 0,
NumMessages: 10,
NumExtensions: 0,
NumServices: 1,
},
GoTypes: file_proto_v1alpha1_ess_proto_goTypes,
DependencyIndexes: file_proto_v1alpha1_ess_proto_depIdxs,
MessageInfos: file_proto_v1alpha1_ess_proto_msgTypes,
}.Build()
File_proto_v1alpha1_ess_proto = out.File
file_proto_v1alpha1_ess_proto_goTypes = nil
file_proto_v1alpha1_ess_proto_depIdxs = nil
}

View File

@ -1,71 +0,0 @@
/*
Copyright 2023 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
syntax = "proto3";
// buf:lint:ignore PACKAGE_DIRECTORY_MATCH
package ess.proto.v1alpha1;
option go_package = "github.com/crossplane/crossplane-runtime/apis/proto/v1alpha1";
// ExternalSecretStorePluginService defines the APIs for an External Secret Store plugin.
service ExternalSecretStorePluginService {
rpc GetSecret(GetSecretRequest) returns (GetSecretResponse) {}
rpc ApplySecret(ApplySecretRequest) returns (ApplySecretResponse) {}
rpc DeleteKeys(DeleteKeysRequest) returns (DeleteKeysResponse) {}
}
// ConfigReference is used to refer a StoreConfig object.
message ConfigReference {
string api_version = 1;
string kind = 2;
string name = 3;
}
// Secret defines the structure of a secret.
message Secret {
string scoped_name = 1;
map<string, string> metadata = 2;
map<string, bytes> data = 3;
}
// GetSecretRequest requests secret from the secret store.
message GetSecretRequest {
ConfigReference config = 1;
Secret secret = 2;
}
// GetSecretResponse returns the secret from the secret store.
message GetSecretResponse {
Secret secret = 1;
}
// ApplySecretRequest applies the secret data update to the secret store.
message ApplySecretRequest {
ConfigReference config = 1;
Secret secret = 2;
}
// ApplySecretResponse returns if the secret is changed or not.
message ApplySecretResponse {
bool changed = 1;
}
// DeleteKeysRequest deletes the secret from the secret store.
message DeleteKeysRequest {
ConfigReference config = 1;
Secret secret = 2;
}
// DeleteKeysResponse is returned if the secret is deleted.
message DeleteKeysResponse {}

View File

@ -1,198 +0,0 @@
//
//Copyright 2023 The Crossplane Authors.
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//http://www.apache.org/licenses/LICENSE-2.0
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.3.0
// - protoc (unknown)
// source: proto/v1alpha1/ess.proto
// buf:lint:ignore PACKAGE_DIRECTORY_MATCH
package v1alpha1
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
const (
ExternalSecretStorePluginService_GetSecret_FullMethodName = "/ess.proto.v1alpha1.ExternalSecretStorePluginService/GetSecret"
ExternalSecretStorePluginService_ApplySecret_FullMethodName = "/ess.proto.v1alpha1.ExternalSecretStorePluginService/ApplySecret"
ExternalSecretStorePluginService_DeleteKeys_FullMethodName = "/ess.proto.v1alpha1.ExternalSecretStorePluginService/DeleteKeys"
)
// ExternalSecretStorePluginServiceClient is the client API for ExternalSecretStorePluginService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type ExternalSecretStorePluginServiceClient interface {
GetSecret(ctx context.Context, in *GetSecretRequest, opts ...grpc.CallOption) (*GetSecretResponse, error)
ApplySecret(ctx context.Context, in *ApplySecretRequest, opts ...grpc.CallOption) (*ApplySecretResponse, error)
DeleteKeys(ctx context.Context, in *DeleteKeysRequest, opts ...grpc.CallOption) (*DeleteKeysResponse, error)
}
type externalSecretStorePluginServiceClient struct {
cc grpc.ClientConnInterface
}
func NewExternalSecretStorePluginServiceClient(cc grpc.ClientConnInterface) ExternalSecretStorePluginServiceClient {
return &externalSecretStorePluginServiceClient{cc}
}
func (c *externalSecretStorePluginServiceClient) GetSecret(ctx context.Context, in *GetSecretRequest, opts ...grpc.CallOption) (*GetSecretResponse, error) {
out := new(GetSecretResponse)
err := c.cc.Invoke(ctx, ExternalSecretStorePluginService_GetSecret_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *externalSecretStorePluginServiceClient) ApplySecret(ctx context.Context, in *ApplySecretRequest, opts ...grpc.CallOption) (*ApplySecretResponse, error) {
out := new(ApplySecretResponse)
err := c.cc.Invoke(ctx, ExternalSecretStorePluginService_ApplySecret_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *externalSecretStorePluginServiceClient) DeleteKeys(ctx context.Context, in *DeleteKeysRequest, opts ...grpc.CallOption) (*DeleteKeysResponse, error) {
out := new(DeleteKeysResponse)
err := c.cc.Invoke(ctx, ExternalSecretStorePluginService_DeleteKeys_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// ExternalSecretStorePluginServiceServer is the server API for ExternalSecretStorePluginService service.
// All implementations must embed UnimplementedExternalSecretStorePluginServiceServer
// for forward compatibility
type ExternalSecretStorePluginServiceServer interface {
GetSecret(context.Context, *GetSecretRequest) (*GetSecretResponse, error)
ApplySecret(context.Context, *ApplySecretRequest) (*ApplySecretResponse, error)
DeleteKeys(context.Context, *DeleteKeysRequest) (*DeleteKeysResponse, error)
mustEmbedUnimplementedExternalSecretStorePluginServiceServer()
}
// UnimplementedExternalSecretStorePluginServiceServer must be embedded to have forward compatible implementations.
type UnimplementedExternalSecretStorePluginServiceServer struct {
}
func (UnimplementedExternalSecretStorePluginServiceServer) GetSecret(context.Context, *GetSecretRequest) (*GetSecretResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetSecret not implemented")
}
func (UnimplementedExternalSecretStorePluginServiceServer) ApplySecret(context.Context, *ApplySecretRequest) (*ApplySecretResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ApplySecret not implemented")
}
func (UnimplementedExternalSecretStorePluginServiceServer) DeleteKeys(context.Context, *DeleteKeysRequest) (*DeleteKeysResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method DeleteKeys not implemented")
}
func (UnimplementedExternalSecretStorePluginServiceServer) mustEmbedUnimplementedExternalSecretStorePluginServiceServer() {
}
// UnsafeExternalSecretStorePluginServiceServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to ExternalSecretStorePluginServiceServer will
// result in compilation errors.
type UnsafeExternalSecretStorePluginServiceServer interface {
mustEmbedUnimplementedExternalSecretStorePluginServiceServer()
}
func RegisterExternalSecretStorePluginServiceServer(s grpc.ServiceRegistrar, srv ExternalSecretStorePluginServiceServer) {
s.RegisterService(&ExternalSecretStorePluginService_ServiceDesc, srv)
}
func _ExternalSecretStorePluginService_GetSecret_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetSecretRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ExternalSecretStorePluginServiceServer).GetSecret(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: ExternalSecretStorePluginService_GetSecret_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ExternalSecretStorePluginServiceServer).GetSecret(ctx, req.(*GetSecretRequest))
}
return interceptor(ctx, in, info, handler)
}
func _ExternalSecretStorePluginService_ApplySecret_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ApplySecretRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ExternalSecretStorePluginServiceServer).ApplySecret(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: ExternalSecretStorePluginService_ApplySecret_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ExternalSecretStorePluginServiceServer).ApplySecret(ctx, req.(*ApplySecretRequest))
}
return interceptor(ctx, in, info, handler)
}
func _ExternalSecretStorePluginService_DeleteKeys_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeleteKeysRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ExternalSecretStorePluginServiceServer).DeleteKeys(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: ExternalSecretStorePluginService_DeleteKeys_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ExternalSecretStorePluginServiceServer).DeleteKeys(ctx, req.(*DeleteKeysRequest))
}
return interceptor(ctx, in, info, handler)
}
// ExternalSecretStorePluginService_ServiceDesc is the grpc.ServiceDesc for ExternalSecretStorePluginService service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var ExternalSecretStorePluginService_ServiceDesc = grpc.ServiceDesc{
ServiceName: "ess.proto.v1alpha1.ExternalSecretStorePluginService",
HandlerType: (*ExternalSecretStorePluginServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "GetSecret",
Handler: _ExternalSecretStorePluginService_GetSecret_Handler,
},
{
MethodName: "ApplySecret",
Handler: _ExternalSecretStorePluginService_ApplySecret_Handler,
},
{
MethodName: "DeleteKeys",
Handler: _ExternalSecretStorePluginService_DeleteKeys_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "proto/v1alpha1/ess.proto",
}

1
build Submodule

@ -0,0 +1 @@
Subproject commit bd63a4167ae20a71788537217b022fced8f2f854

176
go.mod
View File

@ -1,84 +1,124 @@
module github.com/crossplane/crossplane-runtime
go 1.24.0
go 1.17
require (
dario.cat/mergo v1.0.1
github.com/evanphx/json-patch v5.9.11+incompatible
github.com/go-logr/logr v1.4.2
github.com/google/go-cmp v0.7.0
github.com/prometheus/client_golang v1.22.0
github.com/spf13/afero v1.11.0
golang.org/x/time v0.9.0
google.golang.org/grpc v1.68.1
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0
google.golang.org/protobuf v1.36.5
k8s.io/api v0.33.0
k8s.io/apiextensions-apiserver v0.33.0
k8s.io/apimachinery v0.33.0
k8s.io/client-go v0.33.0
k8s.io/component-base v0.33.0
k8s.io/klog/v2 v2.130.1
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738
sigs.k8s.io/controller-runtime v0.19.0
sigs.k8s.io/controller-tools v0.18.0
sigs.k8s.io/yaml v1.4.0
github.com/go-logr/logr v1.2.0
github.com/google/go-cmp v0.5.6
github.com/hashicorp/go-getter v1.4.0
github.com/hashicorp/vault/api v1.3.1
github.com/imdario/mergo v0.3.12
github.com/spf13/afero v1.8.0
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac
k8s.io/api v0.23.0
k8s.io/apiextensions-apiserver v0.23.0
k8s.io/apimachinery v0.23.0
k8s.io/client-go v0.23.0
sigs.k8s.io/controller-runtime v0.11.0
sigs.k8s.io/controller-tools v0.8.0
sigs.k8s.io/yaml v1.3.0
)
require (
cloud.google.com/go v0.81.0 // indirect
cloud.google.com/go/storage v1.14.0 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.11.18 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.13 // indirect
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/armon/go-metrics v0.3.9 // indirect
github.com/armon/go-radix v1.0.0 // indirect
github.com/aws/aws-sdk-go v1.15.78 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
github.com/fatih/color v1.18.0 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/gobuffalo/flect v1.0.3 // indirect
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect
github.com/cenkalti/backoff/v3 v3.0.0 // indirect
github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/fatih/color v1.12.0 // indirect
github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect
github.com/fsnotify/fsnotify v1.5.1 // indirect
github.com/gobuffalo/flect v0.2.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/google/gnostic-models v0.6.9 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/gofuzz v1.1.0 // indirect
github.com/google/uuid v1.1.2 // indirect
github.com/googleapis/gax-go/v2 v2.0.5 // indirect
github.com/googleapis/gnostic v0.5.5 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-hclog v0.16.2 // indirect
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-plugin v1.4.3 // indirect
github.com/hashicorp/go-retryablehttp v0.6.6 // indirect
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
github.com/hashicorp/go-safetemp v1.0.0 // indirect
github.com/hashicorp/go-secure-stdlib/mlock v0.1.1 // indirect
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.1 // indirect
github.com/hashicorp/go-secure-stdlib/strutil v0.1.1 // indirect
github.com/hashicorp/go-sockaddr v1.0.2 // indirect
github.com/hashicorp/go-uuid v1.0.2 // indirect
github.com/hashicorp/go-version v1.2.0 // indirect
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/hashicorp/vault/sdk v0.3.0 // indirect
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/jstemmer/go-junit-report v0.9.1 // indirect
github.com/mattn/go-colorable v0.1.8 // indirect
github.com/mattn/go-isatty v0.0.12 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/mitchellh/copystructure v1.0.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/go-testing-interface v1.0.0 // indirect
github.com/mitchellh/mapstructure v1.4.2 // indirect
github.com/mitchellh/reflectwalk v1.0.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/oklog/run v1.0.0 // indirect
github.com/pierrec/lz4 v2.5.2+incompatible // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.62.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/spf13/cobra v1.9.1 // indirect
github.com/spf13/pflag v1.0.6 // indirect
github.com/x448/float16 v0.8.4 // indirect
go.opentelemetry.io/otel v1.33.0 // indirect
go.opentelemetry.io/otel/trace v1.33.0 // indirect
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
golang.org/x/mod v0.24.0 // indirect
golang.org/x/net v0.39.0 // indirect
golang.org/x/oauth2 v0.27.0 // indirect
golang.org/x/sync v0.13.0 // indirect
golang.org/x/sys v0.32.0 // indirect
golang.org/x/term v0.31.0 // indirect
golang.org/x/text v0.24.0 // indirect
golang.org/x/tools v0.32.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
github.com/prometheus/client_golang v1.11.0 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.28.0 // indirect
github.com/prometheus/procfs v0.6.0 // indirect
github.com/ryanuber/go-glob v1.0.0 // indirect
github.com/spf13/cobra v1.2.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/ulikunitz/xz v0.5.5 // indirect
go.opencensus.io v0.23.0 // indirect
go.uber.org/atomic v1.9.0 // indirect
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa // indirect
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect
golang.org/x/mod v0.4.2 // indirect
golang.org/x/net v0.0.0-20210825183410-e898025ed96a // indirect
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8 // indirect
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
google.golang.org/api v0.44.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2 // indirect
google.golang.org/grpc v1.41.0 // indirect
google.golang.org/protobuf v1.27.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/square/go-jose.v2 v2.5.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/code-generator v0.33.0 // indirect
k8s.io/gengo/v2 v2.0.0-20250207200755-1244d31929d7 // indirect
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
k8s.io/component-base v0.23.0 // indirect
k8s.io/klog/v2 v2.30.0 // indirect
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect
k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b // indirect
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.0 // indirect
)

1241
go.sum

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/*
Copyright 2025 The Crossplane Authors.
Copyright 2019 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -1,3 +1,3 @@
`{{violation.rule}}`: {{violation.message}}
Refer to Crossplane's [coding style documentation](https://github.com/crossplane/crossplane/blob/main/CONTRIBUTING.md#coding-style-and-linting) for more information.
Refer to Crossplane's [coding style documentation](https://github.com/crossplane/crossplane/blob/master/CONTRIBUTING.md#coding-style-and-linting) for more information.

View File

@ -1,70 +0,0 @@
/*
Copyright 2023 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package certificates loads TLS certificates from a given folder.
package certificates
import (
"crypto/tls"
"crypto/x509"
"os"
"path/filepath"
"github.com/crossplane/crossplane-runtime/pkg/errors"
)
const (
errLoadCert = "cannot load certificate"
errLoadCA = "cannot load CA certificate"
errInvalidCA = "invalid CA certificate"
)
// LoadMTLSConfig loads TLS certificates in the given folder using well-defined filenames for certificates in a Kubernetes environment.
func LoadMTLSConfig(caPath, certPath, keyPath string, isServer bool) (*tls.Config, error) {
tlsCertFilePath := filepath.Clean(certPath)
tlsKeyFilePath := filepath.Clean(keyPath)
certificate, err := tls.LoadX509KeyPair(tlsCertFilePath, tlsKeyFilePath)
if err != nil {
return nil, errors.Wrap(err, errLoadCert)
}
caCertFilePath := filepath.Clean(caPath)
ca, err := os.ReadFile(caCertFilePath)
if err != nil {
return nil, errors.Wrap(err, errLoadCA)
}
pool := x509.NewCertPool()
if !pool.AppendCertsFromPEM(ca) {
return nil, errors.New(errInvalidCA)
}
tlsConfig := &tls.Config{
MinVersion: tls.VersionTLS12,
Certificates: []tls.Certificate{certificate},
}
if isServer {
tlsConfig.ClientCAs = pool
tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert
} else {
tlsConfig.RootCAs = pool
}
return tlsConfig, nil
}

View File

@ -1,112 +0,0 @@
package certificates
import (
"crypto/tls"
"path/filepath"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/crossplane/crossplane-runtime/pkg/errors"
"github.com/crossplane/crossplane-runtime/pkg/test"
)
var (
errNoSuchFile = errors.New("open invalid/path/tls.crt: no such file or directory")
errNoCAFile = errors.New("open test-data/no-ca/ca.crt: no such file or directory")
)
const (
caCertFileName = "ca.crt"
tlsCertFileName = "tls.crt"
tlsKeyFileName = "tls.key"
)
func TestLoad(t *testing.T) {
type args struct {
certsFolderPath string
requireClientValidation bool
}
type want struct {
err error
out *tls.Config
}
cases := map[string]struct {
reason string
args
want
}{
"LoadCertError": {
reason: "Should return a proper error if certificates do not exist.",
args: args{
certsFolderPath: "invalid/path",
},
want: want{
err: errors.Wrap(errNoSuchFile, errLoadCert),
out: nil,
},
},
"LoadCAError": {
reason: "Should return a proper error if CA certificate does not exist.",
args: args{
certsFolderPath: "test-data/no-ca",
},
want: want{
err: errors.Wrap(errNoCAFile, errLoadCA),
out: nil,
},
},
"InvalidCAError": {
reason: "Should return a proper error if CA certificate is not valid.",
args: args{
certsFolderPath: "test-data/invalid-certs/",
},
want: want{
err: errors.New(errInvalidCA),
out: nil,
},
},
"NoError": {
reason: "Should not return an error after loading certificates.",
args: args{
certsFolderPath: "test-data/certs/",
},
want: want{
err: nil,
out: &tls.Config{},
},
},
"NoErrorWithClientValidation": {
reason: "Should not return an error after loading certificates.",
args: args{
certsFolderPath: "test-data/certs/",
requireClientValidation: true,
},
want: want{
err: nil,
out: &tls.Config{
ClientAuth: tls.RequireAndVerifyClientCert,
},
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
certsFolderPath := tc.certsFolderPath
requireClient := tc.requireClientValidation
cfg, err := LoadMTLSConfig(filepath.Join(certsFolderPath, caCertFileName), filepath.Join(certsFolderPath, tlsCertFileName), filepath.Join(certsFolderPath, tlsKeyFileName), requireClient)
if diff := cmp.Diff(tc.err, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nLoad(...): -want error, +got error:\n%s", tc.reason, diff)
}
if requireClient {
if diff := cmp.Diff(tc.out.ClientAuth, cfg.ClientAuth); diff != "" {
t.Errorf("\n%s\nLoad(...): -want, +got:\n%s", tc.reason, diff)
}
}
})
}
}

View File

@ -1,10 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIBejCCASGgAwIBAgIIOGozHYTTZu4wCgYIKoZIzj0EAwIwETEPMA0GA1UEAxMG
Um9vdENBMCAXDTE5MTIyMzA4NTYzN1oYDzIxMTkxMTI5MDkwMTM3WjARMQ8wDQYD
VQQDEwZSb290Q0EwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQmKXRMMLbjn8ur
DaO/rNa8VXq32FHt7wr8+xXf0OhaCimQHxWmCHXmierP+UWs4TwZ5/NTyHZ8OOCj
sSEGgA1ao2EwXzAOBgNVHQ8BAf8EBAMCAaYwHQYDVR0lBBYwFAYIKwYBBQUHAwEG
CCsGAQUFBwMCMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNQ5LeIUMgDmha6m
HlW5Yte2trnyMAoGCCqGSM49BAMCA0cAMEQCIACPtB0wO8CGBjdANqnHOnREgEqu
KieHeY3sYL2H+7YfAiAmfLtMe3hPdI3+sDPVZTPDe8HYFher8yWb/DCBZCT1Ww==
-----END CERTIFICATE-----

View File

@ -1,12 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIBxDCCAWmgAwIBAgIUVkhaF0okPcEJaKYKJRyTHU+aQMwwCgYIKoZIzj0EAwIw
ETEPMA0GA1UEAxMGUm9vdENBMCAXDTE5MTIyMzA4NTkwMFoYDzIxMTkxMTI5MDg1
OTAwWjARMQ8wDQYDVQQDEwZjbGllbnQwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNC
AASyDjp+6zyn0W2MWtX07u3iudcahyLtTD51DzTIdplcT/bezWBWxLnP0JzzGORS
f/Uf59PjMCbE66fFSNCQpcdlo4GcMIGZMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUE
FjAUBggrBgEFBQcDAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQU
rRNJVmij3xwiQyNfzKuhcCKnAtAwHwYDVR0jBBgwFoAU1Dkt4hQyAOaFrqYeVbli
17a2ufIwGgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMAoGCCqGSM49BAMCA0kA
MEYCIQCpZppRb5t2kjyILMnLhJ/cHKsvXpAWcO8FrDx/VBoP1wIhALtw1B73X2bj
EPps3Or2UzJNxNroBNRgqIo7XkaKQRe8
-----END CERTIFICATE-----

View File

@ -1,5 +0,0 @@
-----BEGIN EC PRIVATE KEY-----
MHcCAQEEIDcpnLnAoOvR+q7rEKEY4zEWTicMkPaHJ1iC8lHEy9v8oAoGCCqGSM49
AwEHoUQDQgAEsg46fus8p9FtjFrV9O7t4rnXGoci7Uw+dQ80yHaZXE/23s1gVsS5
z9Cc8xjkUn/1H+fT4zAmxOunxUjQkKXHZQ==
-----END EC PRIVATE KEY-----

View File

@ -1,8 +0,0 @@
MIIBejCCASGgAwIBAgIIOGozHYTTZu4wCgYIKoZIzj0EAwIwETEPMA0GA1UEAxMG
Um9vdENBMCAXDTE5MTIyMzA4NTYzN1oYDzIxMTkxMTI5MDkwMTM3WjARMQ8wDQYD
VQQDEwZSb290Q0EwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQmKXRMMLbjn8ur
DaO/rNa8VXq32FHt7wr8+xXf0OhaCimQHxWmCHXmierP+UWs4TwZ5/NTyHZ8OOCj
sSEGgA1ao2EwXzAOBgNVHQ8BAf8EBAMCAaYwHQYDVR0lBBYwFAYIKwYBBQUHAwEG
CCsGAQUFBwMCMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNQ5LeIUMgDmha6m
HlW5Yte2trnyMAoGCCqGSM49BAMCA0cAMEQCIACPtB0wO8CGBjdANqnHOnREgEqu
KieHeY3sYL2H+7YfAiAmfLtMe3hPdI3+sDPVZTPDe8HYFher8yWb/DCBZCT1Ww==

View File

@ -1,12 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIBxDCCAWmgAwIBAgIUVkhaF0okPcEJaKYKJRyTHU+aQMwwCgYIKoZIzj0EAwIw
ETEPMA0GA1UEAxMGUm9vdENBMCAXDTE5MTIyMzA4NTkwMFoYDzIxMTkxMTI5MDg1
OTAwWjARMQ8wDQYDVQQDEwZjbGllbnQwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNC
AASyDjp+6zyn0W2MWtX07u3iudcahyLtTD51DzTIdplcT/bezWBWxLnP0JzzGORS
f/Uf59PjMCbE66fFSNCQpcdlo4GcMIGZMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUE
FjAUBggrBgEFBQcDAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQU
rRNJVmij3xwiQyNfzKuhcCKnAtAwHwYDVR0jBBgwFoAU1Dkt4hQyAOaFrqYeVbli
17a2ufIwGgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMAoGCCqGSM49BAMCA0kA
MEYCIQCpZppRb5t2kjyILMnLhJ/cHKsvXpAWcO8FrDx/VBoP1wIhALtw1B73X2bj
EPps3Or2UzJNxNroBNRgqIo7XkaKQRe8
-----END CERTIFICATE-----

View File

@ -1,5 +0,0 @@
-----BEGIN EC PRIVATE KEY-----
MHcCAQEEIDcpnLnAoOvR+q7rEKEY4zEWTicMkPaHJ1iC8lHEy9v8oAoGCCqGSM49
AwEHoUQDQgAEsg46fus8p9FtjFrV9O7t4rnXGoci7Uw+dQ80yHaZXE/23s1gVsS5
z9Cc8xjkUn/1H+fT4zAmxOunxUjQkKXHZQ==
-----END EC PRIVATE KEY-----

View File

@ -1,12 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIBxDCCAWmgAwIBAgIUVkhaF0okPcEJaKYKJRyTHU+aQMwwCgYIKoZIzj0EAwIw
ETEPMA0GA1UEAxMGUm9vdENBMCAXDTE5MTIyMzA4NTkwMFoYDzIxMTkxMTI5MDg1
OTAwWjARMQ8wDQYDVQQDEwZjbGllbnQwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNC
AASyDjp+6zyn0W2MWtX07u3iudcahyLtTD51DzTIdplcT/bezWBWxLnP0JzzGORS
f/Uf59PjMCbE66fFSNCQpcdlo4GcMIGZMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUE
FjAUBggrBgEFBQcDAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQU
rRNJVmij3xwiQyNfzKuhcCKnAtAwHwYDVR0jBBgwFoAU1Dkt4hQyAOaFrqYeVbli
17a2ufIwGgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMAoGCCqGSM49BAMCA0kA
MEYCIQCpZppRb5t2kjyILMnLhJ/cHKsvXpAWcO8FrDx/VBoP1wIhALtw1B73X2bj
EPps3Or2UzJNxNroBNRgqIo7XkaKQRe8
-----END CERTIFICATE-----

View File

@ -1,5 +0,0 @@
-----BEGIN EC PRIVATE KEY-----
MHcCAQEEIDcpnLnAoOvR+q7rEKEY4zEWTicMkPaHJ1iC8lHEy9v8oAoGCCqGSM49
AwEHoUQDQgAEsg46fus8p9FtjFrV9O7t4rnXGoci7Uw+dQ80yHaZXE/23s1gVsS5
z9Cc8xjkUn/1H+fT4zAmxOunxUjQkKXHZQ==
-----END EC PRIVATE KEY-----

View File

@ -1,73 +0,0 @@
/*
Copyright 2025 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package conditions enables consistent interactions with an object's status conditions.
package conditions
import (
xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1"
"github.com/crossplane/crossplane-runtime/pkg/resource"
)
// ObjectWithConditions is the interface definition that allows.
type ObjectWithConditions interface {
resource.Object
resource.Conditioned
}
// Manager is an interface for a stateless factory-like object that produces ConditionSet objects.
type Manager interface {
// For returns an implementation of a ConditionSet to operate on a specific ObjectWithConditions.
For(o ObjectWithConditions) ConditionSet
}
// ConditionSet holds operations for interacting with an object's conditions.
type ConditionSet interface {
// MarkConditions adds or updates the conditions onto the managed resource object. Unlike a "Set" method, this also
// can add contextual updates to the condition such as propagating the correct observedGeneration to the conditions
// being changed.
MarkConditions(condition ...xpv1.Condition)
}
// ObservedGenerationPropagationManager is the top level factor for producing a ConditionSet
// on behalf of a ObjectWithConditions resource, the ConditionSet is only currently concerned with
// propagating observedGeneration to conditions that are being updated.
// observedGenerationPropagationManager implements Manager.
type ObservedGenerationPropagationManager struct{}
// For implements Manager.For.
func (m ObservedGenerationPropagationManager) For(o ObjectWithConditions) ConditionSet {
return &observedGenerationPropagationConditionSet{o: o}
}
// observedGenerationPropagationConditionSet propagates the meta.generation of the given object
// to the observedGeneration of any condition being set via the `MarkConditions` method.
type observedGenerationPropagationConditionSet struct {
o ObjectWithConditions
}
// MarkConditions implements ConditionSet.MarkConditions.
func (c *observedGenerationPropagationConditionSet) MarkConditions(condition ...xpv1.Condition) {
if c == nil || c.o == nil {
return
}
// Foreach condition we have been sent to mark, update the observed generation.
for i := range condition {
condition[i].ObservedGeneration = c.o.GetGeneration()
}
c.o.SetConditions(condition...)
}

View File

@ -1,135 +0,0 @@
/*
Copyright 2025 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package conditions
import (
"reflect"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1"
"github.com/crossplane/crossplane-runtime/pkg/resource/fake"
"github.com/crossplane/crossplane-runtime/pkg/test"
)
// Check that conditionsImpl implements ConditionManager.
var _ Manager = (*ObservedGenerationPropagationManager)(nil)
// Check that conditionSet implements ConditionSet.
var _ ConditionSet = (*observedGenerationPropagationConditionSet)(nil)
func TestOGConditionSetMark(t *testing.T) {
manager := new(ObservedGenerationPropagationManager)
tests := map[string]struct {
reason string
start []xpv1.Condition
mark []xpv1.Condition
want []xpv1.Condition
}{
"ProvideNoConditions": {
reason: "If updating a resource without conditions with no new conditions, conditions should remain empty.",
start: nil,
mark: nil,
want: nil,
},
"EmptyAppendCondition": {
reason: "If starting with a resource without conditions, and we mark a condition, it should propagate to conditions with the correct generation.",
start: nil,
mark: []xpv1.Condition{xpv1.ReconcileSuccess()},
want: []xpv1.Condition{xpv1.ReconcileSuccess().WithObservedGeneration(42)},
},
"ExistingMarkNothing": {
reason: "If the resource has a condition and we update nothing, nothing should change.",
start: []xpv1.Condition{xpv1.Available().WithObservedGeneration(1)},
mark: nil,
want: []xpv1.Condition{xpv1.Available().WithObservedGeneration(1)},
},
"ExistingUpdated": {
reason: "If a resource starts with a condition, and we update it, we should see the observedGeneration be updated",
start: []xpv1.Condition{xpv1.ReconcileSuccess().WithObservedGeneration(1)},
mark: []xpv1.Condition{xpv1.ReconcileSuccess()},
want: []xpv1.Condition{xpv1.ReconcileSuccess().WithObservedGeneration(42)},
},
"ExistingAppended": {
reason: "If a resource has an existing condition and we make another condition, the new condition should merge into the conditions list.",
start: []xpv1.Condition{xpv1.Available().WithObservedGeneration(1)},
mark: []xpv1.Condition{xpv1.ReconcileSuccess()},
want: []xpv1.Condition{xpv1.Available().WithObservedGeneration(1), xpv1.ReconcileSuccess().WithObservedGeneration(42)},
},
}
for name, tt := range tests {
t.Run(name, func(t *testing.T) {
ut := newManaged(42, tt.start...)
c := manager.For(ut)
c.MarkConditions(tt.mark...)
if diff := cmp.Diff(tt.want, ut.Conditions, test.EquateConditions(), cmpopts.EquateApproxTime(1*time.Second)); diff != "" {
t.Errorf("\nReason: %s\n-want, +got:\n%s", tt.reason, diff)
}
})
}
t.Run("ManageNilObject", func(t *testing.T) {
c := manager.For(nil)
if c == nil {
t.Errorf("manager.For(nil) = %v, want non-nil", c)
}
// Test that Marking on a Manager that has a nil object does not end up panicking.
c.MarkConditions(xpv1.ReconcileSuccess())
// Success!
})
}
func TestOGManagerFor(t *testing.T) {
tests := map[string]struct {
reason string
o ObjectWithConditions
want ConditionSet
}{
"NilObject": {
reason: "Even if an object is nil, the manager should return a non-nil ConditionSet",
want: &observedGenerationPropagationConditionSet{},
},
"Object": {
reason: "Object propagates into manager.",
o: &fake.Managed{},
want: &observedGenerationPropagationConditionSet{
o: &fake.Managed{},
},
},
}
for name, tt := range tests {
t.Run(name, func(t *testing.T) {
m := &ObservedGenerationPropagationManager{}
if got := m.For(tt.o); !reflect.DeepEqual(got, tt.want) {
t.Errorf("\nReason: %s\nFor() = %v, want %v", tt.reason, got, tt.want)
}
})
}
}
func newManaged(generation int64, conditions ...xpv1.Condition) *fake.Managed {
mg := &fake.Managed{}
mg.Generation = generation
mg.SetConditions(conditions...)
return mg
}

View File

@ -0,0 +1,80 @@
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fake
import (
"context"
"encoding/json"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
v1 "github.com/crossplane/crossplane-runtime/apis/common/v1"
"github.com/crossplane/crossplane-runtime/pkg/connection/store"
)
// SecretStore is a fake SecretStore
type SecretStore struct {
ReadKeyValuesFn func(ctx context.Context, n store.ScopedName, s *store.Secret) error
WriteKeyValuesFn func(ctx context.Context, s *store.Secret, wo ...store.WriteOption) (bool, error)
DeleteKeyValuesFn func(ctx context.Context, s *store.Secret, do ...store.DeleteOption) error
}
// ReadKeyValues reads key values.
func (ss *SecretStore) ReadKeyValues(ctx context.Context, n store.ScopedName, s *store.Secret) error {
return ss.ReadKeyValuesFn(ctx, n, s)
}
// WriteKeyValues writes key values.
func (ss *SecretStore) WriteKeyValues(ctx context.Context, s *store.Secret, wo ...store.WriteOption) (bool, error) {
return ss.WriteKeyValuesFn(ctx, s, wo...)
}
// DeleteKeyValues deletes key values.
func (ss *SecretStore) DeleteKeyValues(ctx context.Context, s *store.Secret, do ...store.DeleteOption) error {
return ss.DeleteKeyValuesFn(ctx, s, do...)
}
// StoreConfig is a mock implementation of the StoreConfig interface.
type StoreConfig struct {
metav1.ObjectMeta
Config v1.SecretStoreConfig
v1.ConditionedStatus
}
// GetStoreConfig returns SecretStoreConfig
func (s *StoreConfig) GetStoreConfig() v1.SecretStoreConfig {
return s.Config
}
// GetObjectKind returns schema.ObjectKind.
func (s *StoreConfig) GetObjectKind() schema.ObjectKind {
return schema.EmptyObjectKind
}
// DeepCopyObject returns a copy of the object as runtime.Object
func (s *StoreConfig) DeepCopyObject() runtime.Object {
out := &StoreConfig{}
j, err := json.Marshal(s)
if err != nil {
panic(err)
}
_ = json.Unmarshal(j, out)
return out
}

View File

@ -0,0 +1,39 @@
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package connection
import (
"context"
v1 "github.com/crossplane/crossplane-runtime/apis/common/v1"
"github.com/crossplane/crossplane-runtime/pkg/connection/store"
"github.com/crossplane/crossplane-runtime/pkg/resource"
)
// A StoreConfig configures a connection store.
type StoreConfig interface {
resource.Object
GetStoreConfig() v1.SecretStoreConfig
}
// A Store stores sensitive key values in Secret.
type Store interface {
ReadKeyValues(ctx context.Context, n store.ScopedName, s *store.Secret) error
WriteKeyValues(ctx context.Context, s *store.Secret, wo ...store.WriteOption) (changed bool, err error)
DeleteKeyValues(ctx context.Context, s *store.Secret, do ...store.DeleteOption) error
}

215
pkg/connection/manager.go Normal file
View File

@ -0,0 +1,215 @@
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package connection
import (
"context"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
v1 "github.com/crossplane/crossplane-runtime/apis/common/v1"
"github.com/crossplane/crossplane-runtime/pkg/connection/store"
"github.com/crossplane/crossplane-runtime/pkg/errors"
"github.com/crossplane/crossplane-runtime/pkg/reconciler/managed"
"github.com/crossplane/crossplane-runtime/pkg/resource"
)
// Error strings.
const (
errConnectStore = "cannot connect to secret store"
errWriteStore = "cannot write to secret store"
errReadStore = "cannot read from secret store"
errDeleteFromStore = "cannot delete from secret store"
errGetStoreConfig = "cannot get store config"
errSecretConflict = "cannot establish control of existing connection secret"
errFmtNotOwnedBy = "existing secret is not owned by UID %q"
)
// StoreBuilderFn is a function that builds and returns a Store with a given
// store config.
type StoreBuilderFn func(ctx context.Context, local client.Client, cfg v1.SecretStoreConfig) (Store, error)
// A DetailsManagerOption configures a DetailsManager.
type DetailsManagerOption func(*DetailsManager)
// WithStoreBuilder configures the StoreBuilder to use.
func WithStoreBuilder(sb StoreBuilderFn) DetailsManagerOption {
return func(m *DetailsManager) {
m.storeBuilder = sb
}
}
// DetailsManager is a connection details manager that satisfies the required
// interfaces to work with connection details by managing interaction with
// different store implementations.
type DetailsManager struct {
client client.Client
newConfig func() StoreConfig
storeBuilder StoreBuilderFn
}
// NewDetailsManager returns a new connection DetailsManager.
func NewDetailsManager(c client.Client, of schema.GroupVersionKind, o ...DetailsManagerOption) *DetailsManager {
nc := func() StoreConfig {
return resource.MustCreateObject(of, c.Scheme()).(StoreConfig)
}
// Panic early if we've been asked to reconcile a resource kind that has not
// been registered with our controller manager's scheme.
_ = nc()
m := &DetailsManager{
client: c,
newConfig: nc,
storeBuilder: RuntimeStoreBuilder,
}
for _, mo := range o {
mo(m)
}
return m
}
// PublishConnection publishes the supplied ConnectionDetails to a secret on
// the configured connection Store.
func (m *DetailsManager) PublishConnection(ctx context.Context, so resource.ConnectionSecretOwner, conn managed.ConnectionDetails) (bool, error) {
// This resource does not want to expose a connection secret.
p := so.GetPublishConnectionDetailsTo()
if p == nil {
return false, nil
}
ss, err := m.connectStore(ctx, p)
if err != nil {
return false, errors.Wrap(err, errConnectStore)
}
changed, err := ss.WriteKeyValues(ctx, store.NewSecret(so, store.KeyValues(conn)), SecretToWriteMustBeOwnedBy(so))
return changed, errors.Wrap(err, errWriteStore)
}
// UnpublishConnection deletes connection details secret to the configured
// connection Store.
func (m *DetailsManager) UnpublishConnection(ctx context.Context, so resource.ConnectionSecretOwner, conn managed.ConnectionDetails) error {
// This resource didn't expose a connection secret.
p := so.GetPublishConnectionDetailsTo()
if p == nil {
return nil
}
ss, err := m.connectStore(ctx, p)
if err != nil {
return errors.Wrap(err, errConnectStore)
}
return errors.Wrap(ss.DeleteKeyValues(ctx, store.NewSecret(so, store.KeyValues(conn)), SecretToDeleteMustBeOwnedBy(so)), errDeleteFromStore)
}
// FetchConnection fetches connection details of a given ConnectionSecretOwner.
func (m *DetailsManager) FetchConnection(ctx context.Context, so resource.ConnectionSecretOwner) (managed.ConnectionDetails, error) {
// This resource does not want to expose a connection secret.
p := so.GetPublishConnectionDetailsTo()
if p == nil {
return nil, nil
}
ss, err := m.connectStore(ctx, p)
if err != nil {
return nil, errors.Wrap(err, errConnectStore)
}
s := &store.Secret{}
return managed.ConnectionDetails(s.Data), errors.Wrap(ss.ReadKeyValues(ctx, store.ScopedName{Name: p.Name, Scope: so.GetNamespace()}, s), errReadStore)
}
// PropagateConnection propagate connection details from one resource to another.
func (m *DetailsManager) PropagateConnection(ctx context.Context, to resource.LocalConnectionSecretOwner, from resource.ConnectionSecretOwner) (propagated bool, err error) { // nolint:interfacer
// NOTE(turkenh): Had to add linter exception for "interfacer" suggestion
// to use "store.SecretOwner" as the type of "to" parameter. We want to
// keep it as "resource.LocalConnectionSecretOwner" to satisfy the
// ConnectionPropagater interface for XR Claims.
// Either from does not expose a connection secret, or to does not want one.
if from.GetPublishConnectionDetailsTo() == nil || to.GetPublishConnectionDetailsTo() == nil {
return false, nil
}
ssFrom, err := m.connectStore(ctx, from.GetPublishConnectionDetailsTo())
if err != nil {
return false, errors.Wrap(err, errConnectStore)
}
sFrom := &store.Secret{}
if err = ssFrom.ReadKeyValues(ctx, store.ScopedName{
Name: from.GetPublishConnectionDetailsTo().Name,
Scope: from.GetNamespace(),
}, sFrom); err != nil {
return false, errors.Wrap(err, errReadStore)
}
// Make sure 'from' is the controller of the connection secret it references
// before we propagate it. This ensures a resource cannot use Crossplane to
// circumvent RBAC by propagating a secret it does not own.
if sFrom.GetOwner() != string(from.GetUID()) {
return false, errors.New(errSecretConflict)
}
ssTo, err := m.connectStore(ctx, to.GetPublishConnectionDetailsTo())
if err != nil {
return false, errors.Wrap(err, errConnectStore)
}
changed, err := ssTo.WriteKeyValues(ctx, store.NewSecret(to, sFrom.Data), SecretToWriteMustBeOwnedBy(to))
return changed, errors.Wrap(err, errWriteStore)
}
func (m *DetailsManager) connectStore(ctx context.Context, p *v1.PublishConnectionDetailsTo) (Store, error) {
sc := m.newConfig()
if err := m.client.Get(ctx, types.NamespacedName{Name: p.SecretStoreConfigRef.Name}, sc); err != nil {
return nil, errors.Wrap(err, errGetStoreConfig)
}
return m.storeBuilder(ctx, m.client, sc.GetStoreConfig())
}
// SecretToWriteMustBeOwnedBy requires that the current object is a
// connection secret that is owned by an object with the supplied UID.
func SecretToWriteMustBeOwnedBy(so metav1.Object) store.WriteOption {
return func(_ context.Context, current, _ *store.Secret) error {
return secretMustBeOwnedBy(so, current)
}
}
// SecretToDeleteMustBeOwnedBy requires that the current secret is owned by
// an object with the supplied UID.
func SecretToDeleteMustBeOwnedBy(so metav1.Object) store.DeleteOption {
return func(_ context.Context, secret *store.Secret) error {
return secretMustBeOwnedBy(so, secret)
}
}
func secretMustBeOwnedBy(so metav1.Object, secret *store.Secret) error {
if secret.Metadata == nil || secret.Metadata.GetOwnerUID() != string(so.GetUID()) {
return errors.Errorf(errFmtNotOwnedBy, string(so.GetUID()))
}
return nil
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,241 @@
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubernetes
import (
"context"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/client"
v1 "github.com/crossplane/crossplane-runtime/apis/common/v1"
"github.com/crossplane/crossplane-runtime/pkg/connection/store"
"github.com/crossplane/crossplane-runtime/pkg/errors"
"github.com/crossplane/crossplane-runtime/pkg/resource"
)
// Error strings.
const (
errGetSecret = "cannot get secret"
errDeleteSecret = "cannot delete secret"
errUpdateSecret = "cannot update secret"
errApplySecret = "cannot apply secret"
errExtractKubernetesAuthCreds = "cannot extract kubernetes auth credentials"
errBuildRestConfig = "cannot build rest config kubeconfig"
errBuildClient = "cannot build Kubernetes client"
)
// SecretStore is a Kubernetes Secret Store.
type SecretStore struct {
client resource.ClientApplicator
defaultNamespace string
}
// NewSecretStore returns a new Kubernetes SecretStore.
func NewSecretStore(ctx context.Context, local client.Client, cfg v1.SecretStoreConfig) (*SecretStore, error) {
kube, err := buildClient(ctx, local, cfg)
if err != nil {
return nil, errors.Wrap(err, errBuildClient)
}
return &SecretStore{
client: resource.ClientApplicator{
Client: kube,
Applicator: resource.NewApplicatorWithRetry(resource.NewAPIPatchingApplicator(kube), resource.IsAPIErrorWrapped, nil),
},
defaultNamespace: cfg.DefaultScope,
}, nil
}
func buildClient(ctx context.Context, local client.Client, cfg v1.SecretStoreConfig) (client.Client, error) {
if cfg.Kubernetes == nil {
// No KubernetesSecretStoreConfig provided, local API Server will be
// used as Secret Store.
return local, nil
}
// Configure client for an external API server with a given Kubeconfig.
kfg, err := resource.CommonCredentialExtractor(ctx, cfg.Kubernetes.Auth.Source, local, cfg.Kubernetes.Auth.CommonCredentialSelectors)
if err != nil {
return nil, errors.Wrap(err, errExtractKubernetesAuthCreds)
}
config, err := clientcmd.RESTConfigFromKubeConfig(kfg)
if err != nil {
return nil, errors.Wrap(err, errBuildRestConfig)
}
return client.New(config, client.Options{})
}
// ReadKeyValues reads and returns key value pairs for a given Kubernetes Secret.
func (ss *SecretStore) ReadKeyValues(ctx context.Context, n store.ScopedName, s *store.Secret) error {
ks := &corev1.Secret{}
if err := ss.client.Get(ctx, types.NamespacedName{Name: n.Name, Namespace: ss.namespaceForSecret(n)}, ks); err != nil {
return errors.Wrap(err, errGetSecret)
}
s.Data = ks.Data
s.Metadata = &v1.ConnectionSecretMetadata{
Labels: ks.Labels,
Annotations: ks.Annotations,
Type: &ks.Type,
}
return nil
}
// WriteKeyValues writes key value pairs to a given Kubernetes Secret.
func (ss *SecretStore) WriteKeyValues(ctx context.Context, s *store.Secret, wo ...store.WriteOption) (bool, error) {
ks := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: s.Name,
Namespace: ss.namespaceForSecret(s.ScopedName),
},
Type: resource.SecretTypeConnection,
Data: s.Data,
}
if s.Metadata != nil {
ks.Labels = s.Metadata.Labels
ks.Annotations = s.Metadata.Annotations
if s.Metadata.Type != nil {
ks.Type = *s.Metadata.Type
}
}
ao := applyOptions(wo...)
ao = append(ao, resource.AllowUpdateIf(func(current, desired runtime.Object) bool {
// We consider the update to be a no-op and don't allow it if the
// current and existing secret data are identical.
return !cmp.Equal(current.(*corev1.Secret).Data, desired.(*corev1.Secret).Data, cmpopts.EquateEmpty())
}))
err := ss.client.Apply(ctx, ks, ao...)
if resource.IsNotAllowed(err) {
// The update was not allowed because it was a no-op.
return false, nil
}
if err != nil {
return false, errors.Wrap(err, errApplySecret)
}
return true, nil
}
// DeleteKeyValues delete key value pairs from a given Kubernetes Secret.
// If no kv specified, the whole secret instance is deleted.
// If kv specified, those would be deleted and secret instance will be deleted
// only if there is no data left.
func (ss *SecretStore) DeleteKeyValues(ctx context.Context, s *store.Secret, do ...store.DeleteOption) error {
// NOTE(turkenh): DeleteKeyValues method wouldn't need to do anything if we
// have used owner references similar to existing implementation. However,
// this wouldn't work if the K8s API is not the same as where ConnectionSecretOwner
// object lives, i.e. a remote cluster.
// Considering there is not much additional value with deletion via garbage
// collection in this specific case other than one less API call during
// deletion, I opted for unifying both instead of adding conditional logic
// like add owner references if not remote and not call delete etc.
ks := &corev1.Secret{}
err := ss.client.Get(ctx, types.NamespacedName{Name: s.Name, Namespace: ss.namespaceForSecret(s.ScopedName)}, ks)
if kerrors.IsNotFound(err) {
// Secret already deleted, nothing to do.
return nil
}
if err != nil {
return errors.Wrap(err, errGetSecret)
}
for _, o := range do {
if err = o(ctx, s); err != nil {
return err
}
}
// Delete all supplied keys from secret data
for k := range s.Data {
delete(ks.Data, k)
}
if len(s.Data) == 0 || len(ks.Data) == 0 {
// Secret is deleted only if:
// - No kv to delete specified as input
// - No data left in the secret
return errors.Wrapf(ss.client.Delete(ctx, ks), errDeleteSecret)
}
// If there are still keys left, update the secret with the remaining.
return errors.Wrapf(ss.client.Update(ctx, ks), errUpdateSecret)
}
func (ss *SecretStore) namespaceForSecret(n store.ScopedName) string {
if n.Scope == "" {
return ss.defaultNamespace
}
return n.Scope
}
func applyOptions(wo ...store.WriteOption) []resource.ApplyOption {
ao := make([]resource.ApplyOption, len(wo))
for i := range wo {
o := wo[i]
ao[i] = func(ctx context.Context, current, desired runtime.Object) error {
currentSecret := current.(*corev1.Secret)
desiredSecret := desired.(*corev1.Secret)
cs := &store.Secret{
ScopedName: store.ScopedName{
Name: currentSecret.Name,
Scope: currentSecret.Namespace,
},
Metadata: &v1.ConnectionSecretMetadata{
Labels: currentSecret.Labels,
Annotations: currentSecret.Annotations,
Type: &currentSecret.Type,
},
Data: currentSecret.Data,
}
ds := &store.Secret{
ScopedName: store.ScopedName{
Name: desiredSecret.Name,
Scope: desiredSecret.Namespace,
},
Metadata: &v1.ConnectionSecretMetadata{
Labels: desiredSecret.Labels,
Annotations: desiredSecret.Annotations,
Type: &desiredSecret.Type,
},
Data: desiredSecret.Data,
}
if err := o(ctx, cs, ds); err != nil {
return err
}
desiredSecret.Data = ds.Data
desiredSecret.Labels = ds.Metadata.Labels
desiredSecret.Annotations = ds.Metadata.Annotations
if ds.Metadata.Type != nil {
desiredSecret.Type = *ds.Metadata.Type
}
return nil
}
}
return ao
}

View File

@ -0,0 +1,826 @@
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubernetes
import (
"context"
"testing"
"github.com/google/go-cmp/cmp"
corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/client"
v1 "github.com/crossplane/crossplane-runtime/apis/common/v1"
"github.com/crossplane/crossplane-runtime/pkg/connection/store"
"github.com/crossplane/crossplane-runtime/pkg/errors"
"github.com/crossplane/crossplane-runtime/pkg/resource"
"github.com/crossplane/crossplane-runtime/pkg/test"
)
var (
errBoom = errors.New("boom")
fakeSecretName = "fake"
fakeSecretNamespace = "fake-namespace"
storeTypeKubernetes = v1.SecretStoreKubernetes
)
func fakeKV() map[string][]byte {
return map[string][]byte{
"key1": []byte("value1"),
"key2": []byte("value2"),
"key3": []byte("value3"),
}
}
func fakeLabels() map[string]string {
return map[string]string{
"environment": "unit-test",
"reason": "testing",
}
}
func fakeAnnotations() map[string]string {
return map[string]string{
"some-annotation-key": "some-annotation-value",
}
}
func TestSecretStoreReadKeyValues(t *testing.T) {
type args struct {
client resource.ClientApplicator
n store.ScopedName
}
type want struct {
result store.KeyValues
err error
}
cases := map[string]struct {
reason string
args
want
}{
"CannotGetSecret": {
reason: "Should return a proper error if cannot get the secret",
args: args{
client: resource.ClientApplicator{
Client: &test.MockClient{
MockGet: test.NewMockGetFn(errBoom),
},
},
n: store.ScopedName{
Name: fakeSecretName,
},
},
want: want{
err: errors.Wrap(errBoom, errGetSecret),
},
},
"SuccessfulRead": {
reason: "Should return all key values after a success read",
args: args{
client: resource.ClientApplicator{
Client: &test.MockClient{
MockGet: test.NewMockGetFn(nil, func(obj client.Object) error {
*obj.(*corev1.Secret) = corev1.Secret{
Data: fakeKV(),
}
return nil
}),
},
},
n: store.ScopedName{
Name: fakeSecretName,
},
},
want: want{
result: store.KeyValues(fakeKV()),
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
ss := &SecretStore{
client: tc.args.client,
}
s := &store.Secret{}
err := ss.ReadKeyValues(context.Background(), tc.args.n, s)
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nss.ReadKeyValues(...): -want error, +got error:\n%s", tc.reason, diff)
}
if diff := cmp.Diff(tc.want.result, s.Data); diff != "" {
t.Errorf("\n%s\nss.ReadKeyValues(...): -want, +got:\n%s", tc.reason, diff)
}
})
}
}
func TestSecretStoreWriteKeyValues(t *testing.T) {
secretTypeOpaque := corev1.SecretTypeOpaque
type args struct {
client resource.ClientApplicator
defaultNamespace string
secret *store.Secret
wo []store.WriteOption
}
type want struct {
changed bool
err error
}
cases := map[string]struct {
reason string
args
want
}{
"ApplyFailed": {
reason: "Should return a proper error when cannot apply.",
args: args{
client: resource.ClientApplicator{
Applicator: resource.ApplyFn(func(ctx context.Context, obj client.Object, option ...resource.ApplyOption) error {
return errBoom
}),
},
secret: &store.Secret{
ScopedName: store.ScopedName{
Name: fakeSecretName,
Scope: fakeSecretNamespace,
},
Data: store.KeyValues(fakeKV()),
},
},
want: want{
err: errors.Wrap(errBoom, errApplySecret),
},
},
"FailedWriteOption": {
reason: "Should return a proper error if supplied write option fails",
args: args{
client: resource.ClientApplicator{
Applicator: resource.ApplyFn(func(ctx context.Context, obj client.Object, option ...resource.ApplyOption) error {
for _, fn := range option {
if err := fn(ctx, fakeConnectionSecret(withData(fakeKV())), obj); err != nil {
return err
}
}
return nil
}),
},
secret: &store.Secret{
ScopedName: store.ScopedName{
Name: fakeSecretName,
Scope: fakeSecretNamespace,
},
Data: store.KeyValues(fakeKV()),
},
wo: []store.WriteOption{
func(ctx context.Context, current, desired *store.Secret) error {
return errBoom
},
},
},
want: want{
err: errors.Wrap(errBoom, errApplySecret),
},
},
"SuccessfulWriteOption": {
reason: "Should return a proper error if supplied write option fails",
args: args{
client: resource.ClientApplicator{
Applicator: resource.ApplyFn(func(ctx context.Context, obj client.Object, option ...resource.ApplyOption) error {
for _, fn := range option {
if err := fn(ctx, fakeConnectionSecret(withData(fakeKV())), obj); err != nil {
return err
}
}
return nil
}),
},
secret: &store.Secret{
ScopedName: store.ScopedName{
Name: fakeSecretName,
Scope: fakeSecretNamespace,
},
Data: store.KeyValues(fakeKV()),
},
wo: []store.WriteOption{
func(ctx context.Context, current, desired *store.Secret) error {
desired.Data["customkey"] = []byte("customval")
desired.Metadata = &v1.ConnectionSecretMetadata{
Labels: map[string]string{
"foo": "baz",
},
}
return nil
},
},
},
want: want{
changed: true,
},
},
"SecretAlreadyUpToDate": {
reason: "Should not change secret if already up to date.",
args: args{
client: resource.ClientApplicator{
Applicator: resource.ApplyFn(func(ctx context.Context, obj client.Object, option ...resource.ApplyOption) error {
for _, fn := range option {
if err := fn(ctx, fakeConnectionSecret(withData(fakeKV())), obj); err != nil {
return err
}
}
return nil
}),
},
secret: &store.Secret{
ScopedName: store.ScopedName{
Name: fakeSecretName,
Scope: fakeSecretNamespace,
},
Data: store.KeyValues(fakeKV()),
},
},
},
"SecretUpdatedWithNewValue": {
reason: "Should update value for an existing key if changed.",
args: args{
client: resource.ClientApplicator{
Applicator: resource.ApplyFn(func(ctx context.Context, obj client.Object, option ...resource.ApplyOption) error {
if diff := cmp.Diff(fakeConnectionSecret(withData(map[string][]byte{
"existing-key": []byte("new-value"),
})), obj.(*corev1.Secret)); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
for _, fn := range option {
if err := fn(ctx, fakeConnectionSecret(withData(map[string][]byte{
"existing-key": []byte("old-value"),
})), obj); err != nil {
return err
}
}
return nil
}),
},
secret: &store.Secret{
ScopedName: store.ScopedName{
Name: fakeSecretName,
Scope: fakeSecretNamespace,
},
Data: store.KeyValues(map[string][]byte{
"existing-key": []byte("new-value"),
}),
},
},
want: want{
changed: true,
},
},
"SecretUpdatedWithNewKey": {
reason: "Should update existing secret additively if a new key added",
args: args{
client: resource.ClientApplicator{
Applicator: resource.ApplyFn(func(ctx context.Context, obj client.Object, option ...resource.ApplyOption) error {
if diff := cmp.Diff(fakeConnectionSecret(withData(map[string][]byte{
"new-key": []byte("new-value"),
})), obj.(*corev1.Secret)); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
for _, fn := range option {
if err := fn(ctx, fakeConnectionSecret(withData(map[string][]byte{
"existing-key": []byte("existing-value"),
})), obj); err != nil {
return err
}
}
return nil
}),
},
secret: &store.Secret{
ScopedName: store.ScopedName{
Name: fakeSecretName,
Scope: fakeSecretNamespace,
},
Data: store.KeyValues(map[string][]byte{
"new-key": []byte("new-value"),
}),
},
},
want: want{
changed: true,
},
},
"SecretCreatedWithData": {
reason: "Should create a secret with all key values with default type.",
args: args{
client: resource.ClientApplicator{
Applicator: resource.ApplyFn(func(ctx context.Context, obj client.Object, option ...resource.ApplyOption) error {
if diff := cmp.Diff(fakeConnectionSecret(withData(fakeKV())), obj.(*corev1.Secret)); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
for _, fn := range option {
if err := fn(ctx, &corev1.Secret{}, obj); err != nil {
return err
}
}
return nil
}),
},
secret: &store.Secret{
ScopedName: store.ScopedName{
Name: fakeSecretName,
Scope: fakeSecretNamespace,
},
Data: store.KeyValues(fakeKV()),
},
},
want: want{
changed: true,
},
},
"SecretCreatedWithDataAndMetadata": {
reason: "Should create a secret with all key values and provided metadata data.",
args: args{
client: resource.ClientApplicator{
Applicator: resource.ApplyFn(func(ctx context.Context, obj client.Object, option ...resource.ApplyOption) error {
if diff := cmp.Diff(fakeConnectionSecret(
withData(fakeKV()),
withType(corev1.SecretTypeOpaque),
withLabels(fakeLabels()),
withAnnotations(fakeAnnotations())), obj.(*corev1.Secret)); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
for _, fn := range option {
if err := fn(ctx, &corev1.Secret{}, obj); err != nil {
return err
}
}
return nil
}),
},
secret: &store.Secret{
ScopedName: store.ScopedName{
Name: fakeSecretName,
Scope: fakeSecretNamespace,
},
Metadata: &v1.ConnectionSecretMetadata{
Labels: map[string]string{
"environment": "unit-test",
"reason": "testing",
},
Annotations: map[string]string{
"some-annotation-key": "some-annotation-value",
},
Type: &secretTypeOpaque,
},
Data: store.KeyValues(fakeKV()),
},
},
want: want{
changed: true,
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
ss := &SecretStore{
client: tc.args.client,
defaultNamespace: tc.args.defaultNamespace,
}
changed, err := ss.WriteKeyValues(context.Background(), tc.args.secret, tc.args.wo...)
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nss.WriteKeyValues(...): -want error, +got error:\n%s", tc.reason, diff)
}
if diff := cmp.Diff(tc.want.changed, changed); diff != "" {
t.Errorf("\n%s\nss.WriteKeyValues(...): -want changed, +got changed:\n%s", tc.reason, diff)
}
})
}
}
func TestSecretStoreDeleteKeyValues(t *testing.T) {
type args struct {
client resource.ClientApplicator
defaultNamespace string
secret *store.Secret
do []store.DeleteOption
}
type want struct {
err error
}
cases := map[string]struct {
reason string
args
want
}{
"CannotGetSecret": {
reason: "Should return a proper error when it fails to get secret.",
args: args{
client: resource.ClientApplicator{
Client: &test.MockClient{
MockGet: test.NewMockGetFn(errBoom),
},
},
secret: &store.Secret{
ScopedName: store.ScopedName{
Name: fakeSecretName,
Scope: fakeSecretNamespace,
},
},
},
want: want{
err: errors.Wrap(errBoom, errGetSecret),
},
},
"SecretUpdatedWithRemainingKeys": {
reason: "Should remove supplied keys from secret and update with remaining.",
args: args{
client: resource.ClientApplicator{
Client: &test.MockClient{
MockGet: test.NewMockGetFn(nil, func(obj client.Object) error {
*obj.(*corev1.Secret) = *fakeConnectionSecret(withData(fakeKV()))
return nil
}),
MockUpdate: func(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error {
if diff := cmp.Diff(fakeConnectionSecret(withData(map[string][]byte{"key3": []byte("value3")})), obj.(*corev1.Secret)); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
return nil
},
},
},
secret: &store.Secret{
ScopedName: store.ScopedName{
Name: fakeSecretName,
Scope: fakeSecretNamespace,
},
Data: store.KeyValues(map[string][]byte{
"key1": []byte("value1"),
"key2": []byte("value2"),
}),
},
},
want: want{
err: nil,
},
},
"CannotDeleteSecret": {
reason: "Should return a proper error when it fails to delete secret.",
args: args{
client: resource.ClientApplicator{
Client: &test.MockClient{
MockGet: test.NewMockGetFn(nil, func(obj client.Object) error {
*obj.(*corev1.Secret) = *fakeConnectionSecret()
return nil
}),
MockDelete: test.NewMockDeleteFn(errBoom),
},
},
secret: &store.Secret{
ScopedName: store.ScopedName{
Name: fakeSecretName,
Scope: fakeSecretNamespace,
},
},
},
want: want{
err: errors.Wrap(errBoom, errDeleteSecret),
},
},
"SecretAlreadyDeleted": {
reason: "Should not return error if secret already deleted.",
args: args{
client: resource.ClientApplicator{
Client: &test.MockClient{
MockGet: test.NewMockGetFn(nil, func(obj client.Object) error {
return kerrors.NewNotFound(schema.GroupResource{}, "")
}),
},
},
secret: &store.Secret{
ScopedName: store.ScopedName{
Name: fakeSecretName,
Scope: fakeSecretNamespace,
},
},
},
want: want{
err: nil,
},
},
"FailedDeleteOption": {
reason: "Should return a proper error if provided delete option fails.",
args: args{
client: resource.ClientApplicator{
Client: &test.MockClient{
MockGet: test.NewMockGetFn(nil, func(obj client.Object) error {
*obj.(*corev1.Secret) = *fakeConnectionSecret(withData(fakeKV()))
return nil
}),
MockDelete: func(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error {
return nil
},
},
},
secret: &store.Secret{
ScopedName: store.ScopedName{
Name: fakeSecretName,
Scope: fakeSecretNamespace,
},
},
do: []store.DeleteOption{
func(ctx context.Context, secret *store.Secret) error {
return errBoom
},
},
},
want: want{
err: errBoom,
},
},
"SecretDeletedNoKVSupplied": {
reason: "Should delete the whole secret if no kv supplied as parameter.",
args: args{
client: resource.ClientApplicator{
Client: &test.MockClient{
MockGet: test.NewMockGetFn(nil, func(obj client.Object) error {
*obj.(*corev1.Secret) = *fakeConnectionSecret(withData(fakeKV()))
return nil
}),
MockDelete: func(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error {
return nil
},
},
},
secret: &store.Secret{
ScopedName: store.ScopedName{
Name: fakeSecretName,
Scope: fakeSecretNamespace,
},
},
do: []store.DeleteOption{
func(ctx context.Context, secret *store.Secret) error {
return nil
},
},
},
want: want{
err: nil,
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
ss := &SecretStore{
client: tc.args.client,
defaultNamespace: tc.args.defaultNamespace,
}
err := ss.DeleteKeyValues(context.Background(), tc.args.secret, tc.args.do...)
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nss.DeleteKeyValues(...): -want error, +got error:\n%s", tc.reason, diff)
}
})
}
}
func TestNewSecretStore(t *testing.T) {
type args struct {
client resource.ClientApplicator
cfg v1.SecretStoreConfig
}
type want struct {
err error
}
cases := map[string]struct {
reason string
args
want
}{
"SuccessfulLocal": {
reason: "Should return no error after successfully building local Kubernetes secret store",
args: args{
client: resource.ClientApplicator{},
cfg: v1.SecretStoreConfig{
Type: &storeTypeKubernetes,
DefaultScope: "test-ns",
},
},
want: want{
err: nil,
},
},
"NoSecretWithRemoteKubeconfig": {
reason: "Should fail properly if configured kubeconfig secret does not exist",
args: args{
client: resource.ClientApplicator{
Client: &test.MockClient{
MockGet: test.NewMockGetFn(nil, func(obj client.Object) error {
return kerrors.NewNotFound(schema.GroupResource{}, "kube-conn")
}),
},
},
cfg: v1.SecretStoreConfig{
Type: &storeTypeKubernetes,
DefaultScope: "test-ns",
Kubernetes: &v1.KubernetesSecretStoreConfig{
Auth: v1.KubernetesAuthConfig{
Source: v1.CredentialsSourceSecret,
CommonCredentialSelectors: v1.CommonCredentialSelectors{
SecretRef: &v1.SecretKeySelector{
SecretReference: v1.SecretReference{
Name: "kube-conn",
Namespace: "test-ns",
},
Key: "kubeconfig",
},
},
},
},
},
},
want: want{
err: errors.Wrap(errors.Wrap(errors.Wrap(kerrors.NewNotFound(schema.GroupResource{}, "kube-conn"), "cannot get credentials secret"), errExtractKubernetesAuthCreds), errBuildClient),
},
},
"InvalidRestConfigForRemote": {
reason: "Should fetch the configured kubeconfig and fail if it is not valid",
args: args{
client: resource.ClientApplicator{
Client: &test.MockClient{
MockGet: test.NewMockGetFn(nil, func(obj client.Object) error {
*obj.(*corev1.Secret) = corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "kube-conn",
Namespace: "test-ns",
},
Data: map[string][]byte{
"kubeconfig": []byte(`
apiVersion: v1
kind: Config
malformed
`),
},
}
return nil
}),
},
},
cfg: v1.SecretStoreConfig{
Type: &storeTypeKubernetes,
DefaultScope: "test-ns",
Kubernetes: &v1.KubernetesSecretStoreConfig{
Auth: v1.KubernetesAuthConfig{
Source: v1.CredentialsSourceSecret,
CommonCredentialSelectors: v1.CommonCredentialSelectors{
SecretRef: &v1.SecretKeySelector{
SecretReference: v1.SecretReference{
Name: "kube-conn",
Namespace: "test-ns",
},
Key: "kubeconfig",
},
},
},
},
},
},
want: want{
err: errors.Wrap(errors.Wrap(errors.New("yaml: line 5: could not find expected ':'"), errBuildRestConfig), errBuildClient),
},
},
"InvalidKubeconfigForRemote": {
reason: "Should fetch the configured kubeconfig and fail if it is not valid",
args: args{
client: resource.ClientApplicator{
Client: &test.MockClient{
MockGet: test.NewMockGetFn(nil, func(obj client.Object) error {
*obj.(*corev1.Secret) = corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "kube-conn",
Namespace: "test-ns",
},
Data: map[string][]byte{
"kubeconfig": []byte(`
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: TEST
server: https://127.0.0.1:64695
name: kind-kind
contexts:
- context:
cluster: kind-kind
namespace: crossplane-system
user: kind-kind
name: kind-kind
current-context: kind-kind
kind: Config
users:
- name: kind-kind
user: {}
`),
},
}
return nil
}),
},
},
cfg: v1.SecretStoreConfig{
Type: &storeTypeKubernetes,
DefaultScope: "test-ns",
Kubernetes: &v1.KubernetesSecretStoreConfig{
Auth: v1.KubernetesAuthConfig{
Source: v1.CredentialsSourceSecret,
CommonCredentialSelectors: v1.CommonCredentialSelectors{
SecretRef: &v1.SecretKeySelector{
SecretReference: v1.SecretReference{
Name: "kube-conn",
Namespace: "test-ns",
},
Key: "kubeconfig",
},
},
},
},
},
},
want: want{
err: errors.Wrap(errors.New("unable to load root certificates: unable to parse bytes as PEM block"), errBuildClient),
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
_, err := NewSecretStore(context.Background(), tc.args.client, tc.args.cfg)
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nNewSecretStore(...): -want error, +got error:\n%s", tc.reason, diff)
}
})
}
}
type secretOption func(*corev1.Secret)
func withType(t corev1.SecretType) secretOption {
return func(s *corev1.Secret) {
s.Type = t
}
}
func withData(d map[string][]byte) secretOption {
return func(s *corev1.Secret) {
s.Data = d
}
}
func withLabels(l map[string]string) secretOption {
return func(s *corev1.Secret) {
s.Labels = l
}
}
func withAnnotations(a map[string]string) secretOption {
return func(s *corev1.Secret) {
s.Annotations = a
}
}
func fakeConnectionSecret(opts ...secretOption) *corev1.Secret {
s := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: fakeSecretName,
Namespace: fakeSecretNamespace,
},
Type: resource.SecretTypeConnection,
}
for _, o := range opts {
o(s)
}
return s
}

View File

@ -0,0 +1,91 @@
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package store
import (
"context"
v1 "github.com/crossplane/crossplane-runtime/apis/common/v1"
"github.com/crossplane/crossplane-runtime/pkg/resource"
)
// SecretOwner owns a Secret.
type SecretOwner interface {
resource.Object
resource.ConnectionDetailsPublisherTo
}
// KeyValues is a map with sensitive values.
type KeyValues map[string][]byte
// ScopedName is scoped name of a secret.
type ScopedName struct {
Name string
Scope string
}
// A Secret is an entity representing a set of sensitive Key Values.
type Secret struct {
ScopedName
Metadata *v1.ConnectionSecretMetadata
Data KeyValues
}
// NewSecret returns a new Secret owned by supplied SecretOwner and with
// supplied data.
func NewSecret(so SecretOwner, data KeyValues) *Secret {
if so.GetPublishConnectionDetailsTo() == nil {
return nil
}
p := so.GetPublishConnectionDetailsTo()
if p.Metadata == nil {
p.Metadata = &v1.ConnectionSecretMetadata{}
}
p.Metadata.SetOwnerUID(so.GetUID())
return &Secret{
ScopedName: ScopedName{
Name: p.Name,
Scope: so.GetNamespace(),
},
Metadata: p.Metadata,
Data: data,
}
}
// GetOwner returns the UID of the owner of secret.
func (s *Secret) GetOwner() string {
if s.Metadata == nil {
return ""
}
return s.Metadata.GetOwnerUID()
}
// GetLabels returns the labels of the secret.
func (s *Secret) GetLabels() map[string]string {
if s.Metadata == nil {
return nil
}
return s.Metadata.Labels
}
// A WriteOption is called before writing the desired secret over the
// current object.
type WriteOption func(ctx context.Context, current, desired *Secret) error
// An DeleteOption is called before deleting the secret.
type DeleteOption func(ctx context.Context, secret *Secret) error

View File

@ -0,0 +1,43 @@
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fake
import (
"github.com/crossplane/crossplane-runtime/pkg/connection/store/vault/kv"
)
// KVClient is a fake KVClient.
type KVClient struct {
GetFn func(path string, secret *kv.Secret) error
ApplyFn func(path string, secret *kv.Secret, ao ...kv.ApplyOption) error
DeleteFn func(path string) error
}
// Get fetches a secret at a given path.
func (k *KVClient) Get(path string, secret *kv.Secret) error {
return k.GetFn(path, secret)
}
// Apply creates or updates a secret at a given path.
func (k *KVClient) Apply(path string, secret *kv.Secret, ao ...kv.ApplyOption) error {
return k.ApplyFn(path, secret, ao...)
}
// Delete deletes a secret at a given path.
func (k *KVClient) Delete(path string) error {
return k.DeleteFn(path)
}

View File

@ -0,0 +1,43 @@
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fake
import (
"github.com/hashicorp/vault/api"
)
// LogicalClient is a fake LogicalClient
type LogicalClient struct {
ReadFn func(path string) (*api.Secret, error)
WriteFn func(path string, data map[string]interface{}) (*api.Secret, error)
DeleteFn func(path string) (*api.Secret, error)
}
// Read reads secret at the given path.
func (l *LogicalClient) Read(path string) (*api.Secret, error) {
return l.ReadFn(path)
}
// Write writes data to the given path.
func (l *LogicalClient) Write(path string, data map[string]interface{}) (*api.Secret, error) {
return l.WriteFn(path, data)
}
// Delete deletes secret at the given path.
func (l *LogicalClient) Delete(path string) (*api.Secret, error) {
return l.DeleteFn(path)
}

View File

@ -0,0 +1,99 @@
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kv
import (
"encoding/json"
"github.com/hashicorp/vault/api"
"github.com/crossplane/crossplane-runtime/pkg/resource"
)
const (
errGet = "cannot get secret"
errDelete = "cannot delete secret"
errRead = "cannot read secret"
errWriteData = "cannot write secret Data"
errUpdateNotAllowed = "update not allowed"
// ErrNotFound is the error returned when secret does not exist.
ErrNotFound = "secret not found"
)
// LogicalClient is a client to perform logical backend operations on Vault.
type LogicalClient interface {
Read(path string) (*api.Secret, error)
Write(path string, data map[string]interface{}) (*api.Secret, error)
Delete(path string) (*api.Secret, error)
}
// Secret is a Vault KV secret.
type Secret struct {
CustomMeta map[string]string
Data map[string]string
version json.Number
}
// NewSecret returns a new Secret.
func NewSecret(data map[string]string, meta map[string]string) *Secret {
return &Secret{
Data: data,
CustomMeta: meta,
}
}
// AddData adds supplied key value as data.
func (kv *Secret) AddData(key string, val string) {
if kv.Data == nil {
kv.Data = map[string]string{}
}
kv.Data[key] = val
}
// AddMetadata adds supplied key value as metadata.
func (kv *Secret) AddMetadata(key string, val string) {
if kv.CustomMeta == nil {
kv.CustomMeta = map[string]string{}
}
kv.CustomMeta[key] = val
}
// An ApplyOption is called before patching the current secret to match the
// desired secret. ApplyOptions are not called if no current object exists.
type ApplyOption func(current, desired *Secret) error
// AllowUpdateIf will only update the current object if the supplied fn returns
// true. An error that satisfies IsNotAllowed will be returned if the supplied
// function returns false. Creation of a desired object that does not currently
// exist is always allowed.
func AllowUpdateIf(fn func(current, desired *Secret) bool) ApplyOption {
return func(current, desired *Secret) error {
if fn(current, desired) {
return nil
}
return resource.NewNotAllowed(errUpdateNotAllowed)
}
}
// IsNotFound returns whether given error is a "Not Found" error or not.
func IsNotFound(err error) bool {
if err == nil {
return false
}
return err.Error() == ErrNotFound
}

View File

@ -0,0 +1,133 @@
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kv
import (
"path/filepath"
"strings"
"github.com/hashicorp/vault/api"
"github.com/crossplane/crossplane-runtime/pkg/errors"
"github.com/crossplane/crossplane-runtime/pkg/resource"
)
// We use this prefix to store metadata of v1 secrets as there is no dedicated
// metadata. Considering a connection key cannot contain ":" (since it is not
// in the set of allowed chars for a k8s secret key), it is safe to assume
// there is no actual connection data starting with this prefix.
const metadataPrefix = "metadata:"
// V1Client is a Vault KV V1 Secrets Engine client.
// https://www.vaultproject.io/api-docs/secret/kv/kv-v1
type V1Client struct {
client LogicalClient
mountPath string
}
// NewV1Client returns a new V1Client.
func NewV1Client(logical LogicalClient, mountPath string) *V1Client {
kv := &V1Client{
client: logical,
mountPath: mountPath,
}
return kv
}
// Get returns a Secret at a given path.
func (c *V1Client) Get(path string, secret *Secret) error {
s, err := c.client.Read(filepath.Join(c.mountPath, path))
if err != nil {
return errors.Wrap(err, errRead)
}
if s == nil {
return errors.New(ErrNotFound)
}
return c.parseAsSecret(s, secret)
}
// Apply applies given Secret at path by patching its Data and setting
// provided custom metadata.
func (c *V1Client) Apply(path string, secret *Secret, ao ...ApplyOption) error {
existing := &Secret{}
err := c.Get(path, existing)
if resource.Ignore(IsNotFound, err) != nil {
return errors.Wrap(err, errGet)
}
if !IsNotFound(err) {
for _, o := range ao {
if err = o(existing, secret); err != nil {
return err
}
}
}
dp, changed := payloadV1(existing, secret)
if !changed {
return nil
}
_, err = c.client.Write(filepath.Join(c.mountPath, path), dp)
return errors.Wrap(err, errWriteData)
}
// Delete deletes Secret at the given path.
func (c *V1Client) Delete(path string) error {
_, err := c.client.Delete(filepath.Join(c.mountPath, path))
return errors.Wrap(err, errDelete)
}
func (c *V1Client) parseAsSecret(s *api.Secret, kv *Secret) error {
for key, val := range s.Data {
if sVal, ok := val.(string); ok {
if strings.HasPrefix(key, metadataPrefix) {
kv.AddMetadata(strings.TrimPrefix(key, metadataPrefix), sVal)
continue
}
kv.AddData(key, sVal)
}
}
return nil
}
func payloadV1(existing, new *Secret) (map[string]interface{}, bool) {
payload := make(map[string]interface{}, len(existing.Data)+len(new.Data))
for k, v := range existing.Data {
// Only transfer existing data, metadata updates are not additive.
if !strings.HasPrefix(k, metadataPrefix) {
payload[k] = v
}
}
changed := false
for k, v := range new.Data {
if ev, ok := existing.Data[k]; !ok || ev != v {
changed = true
payload[k] = v
}
}
for k, v := range new.CustomMeta {
// kv secret engine v1 does not have metadata. So, we store them as data
// by prefixing with "metadata:"
if val, ok := existing.CustomMeta[k]; !ok && val != v {
changed = true
}
payload[metadataPrefix+k] = v
}
return payload, changed
}

View File

@ -0,0 +1,491 @@
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kv
import (
"path/filepath"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/hashicorp/vault/api"
"github.com/crossplane/crossplane-runtime/pkg/connection/store/vault/kv/fake"
"github.com/crossplane/crossplane-runtime/pkg/errors"
"github.com/crossplane/crossplane-runtime/pkg/resource"
"github.com/crossplane/crossplane-runtime/pkg/test"
)
func TestV1ClientGet(t *testing.T) {
type args struct {
client LogicalClient
path string
}
type want struct {
err error
out *Secret
}
cases := map[string]struct {
reason string
args
want
}{
"ErrorWhileGettingSecret": {
reason: "Should return a proper error if getting secret failed.",
args: args{
client: &fake.LogicalClient{
ReadFn: func(path string) (*api.Secret, error) {
return nil, errBoom
},
},
path: secretName,
},
want: want{
err: errors.Wrap(errBoom, errRead),
out: NewSecret(nil, nil),
},
},
"SecretNotFound": {
reason: "Should return a notFound error if secret does not exist.",
args: args{
client: &fake.LogicalClient{
ReadFn: func(path string) (*api.Secret, error) {
// Vault logical client returns both error and secret as
// nil if secret does not exist.
return nil, nil
},
},
path: secretName,
},
want: want{
err: errors.New(ErrNotFound),
out: NewSecret(nil, nil),
},
},
"SuccessfulGet": {
reason: "Should successfully return secret from v1 KV engine.",
args: args{
client: &fake.LogicalClient{
ReadFn: func(path string) (*api.Secret, error) {
if diff := cmp.Diff(filepath.Join(mountPath, secretName), path); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
return &api.Secret{
Data: map[string]interface{}{
"foo": "bar",
metadataPrefix + "owner": "jdoe",
metadataPrefix + "mission_critical": "false",
},
}, nil
},
},
path: secretName,
},
want: want{
out: NewSecret(map[string]string{
"foo": "bar",
}, map[string]string{
"owner": "jdoe",
"mission_critical": "false",
}),
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
k := NewV1Client(tc.args.client, mountPath)
s := Secret{}
err := k.Get(tc.args.path, &s)
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nv1Client.Get(...): -want error, +got error:\n%s", tc.reason, diff)
}
if diff := cmp.Diff(tc.want.out, &s, cmpopts.IgnoreUnexported(Secret{})); diff != "" {
t.Errorf("\n%s\nv1Client.Get(...): -want, +got:\n%s", tc.reason, diff)
}
})
}
}
func TestV1ClientApply(t *testing.T) {
type args struct {
client LogicalClient
in *Secret
path string
ao []ApplyOption
}
type want struct {
err error
}
cases := map[string]struct {
reason string
args
want
}{
"ErrorWhileReadingSecret": {
reason: "Should return a proper error if reading secret failed.",
args: args{
client: &fake.LogicalClient{
ReadFn: func(path string) (*api.Secret, error) {
return nil, errBoom
},
},
path: secretName,
},
want: want{
err: errors.Wrap(errors.Wrap(errBoom, errRead), errGet),
},
},
"ErrorWhileWritingData": {
reason: "Should return a proper error if writing secret failed.",
args: args{
client: &fake.LogicalClient{
ReadFn: func(path string) (*api.Secret, error) {
return &api.Secret{
Data: map[string]interface{}{
"key1": "val1",
"key2": "val2",
},
}, nil
},
WriteFn: func(path string, data map[string]interface{}) (*api.Secret, error) {
return nil, errBoom
},
},
path: secretName,
in: NewSecret(map[string]string{
"key1": "val1updated",
"key3": "val3",
}, nil),
},
want: want{
err: errors.Wrap(errBoom, errWriteData),
},
},
"AlreadyUpToDate": {
reason: "Should not perform a write if a v1 secret is already up to date.",
args: args{
client: &fake.LogicalClient{
ReadFn: func(path string) (*api.Secret, error) {
return &api.Secret{
Data: map[string]interface{}{
"foo": "bar",
metadataPrefix + "owner": "jdoe",
metadataPrefix + "mission_critical": "false",
},
}, nil
},
WriteFn: func(path string, data map[string]interface{}) (*api.Secret, error) {
return nil, errors.New("no write operation expected")
},
},
path: secretName,
in: NewSecret(map[string]string{
"foo": "bar",
}, map[string]string{
"owner": "jdoe",
"mission_critical": "false",
}),
},
want: want{
err: nil,
},
},
"SuccessfulCreate": {
reason: "Should successfully create with new data if secret does not exists.",
args: args{
client: &fake.LogicalClient{
ReadFn: func(path string) (*api.Secret, error) {
// Vault logical client returns both error and secret as
// nil if secret does not exist.
return nil, nil
},
WriteFn: func(path string, data map[string]interface{}) (*api.Secret, error) {
if diff := cmp.Diff(filepath.Join(mountPath, secretName), path); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
if diff := cmp.Diff(map[string]interface{}{
"key1": "val1",
"key2": "val2",
}, data); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
return nil, nil
},
},
path: secretName,
in: NewSecret(map[string]string{
"key1": "val1",
"key2": "val2",
}, nil),
},
want: want{
err: nil,
},
},
"UpdateNotAllowed": {
reason: "Should return not allowed error if update is not allowed.",
args: args{
client: &fake.LogicalClient{
ReadFn: func(path string) (*api.Secret, error) {
return &api.Secret{
Data: map[string]interface{}{
"key1": "val1",
"key2": "val2",
},
}, nil
},
WriteFn: func(path string, data map[string]interface{}) (*api.Secret, error) {
if diff := cmp.Diff(filepath.Join(mountPath, secretName), path); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
if diff := cmp.Diff(map[string]interface{}{
"key1": "val1updated",
"key2": "val2",
"key3": "val3",
}, data); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
return nil, nil
},
},
path: secretName,
in: NewSecret(map[string]string{
"key1": "val1updated",
"key3": "val3",
}, nil),
ao: []ApplyOption{
AllowUpdateIf(func(current, desired *Secret) bool {
return false
}),
},
},
want: want{
err: resource.NewNotAllowed(errUpdateNotAllowed),
},
},
"SuccessfulUpdate": {
reason: "Should successfully update by appending new data to existing ones.",
args: args{
client: &fake.LogicalClient{
ReadFn: func(path string) (*api.Secret, error) {
return &api.Secret{
Data: map[string]interface{}{
"key1": "val1",
"key2": "val2",
},
}, nil
},
WriteFn: func(path string, data map[string]interface{}) (*api.Secret, error) {
if diff := cmp.Diff(filepath.Join(mountPath, secretName), path); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
if diff := cmp.Diff(map[string]interface{}{
"key1": "val1updated",
"key2": "val2",
"key3": "val3",
}, data); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
return nil, nil
},
},
path: secretName,
in: NewSecret(map[string]string{
"key1": "val1updated",
"key3": "val3",
}, nil),
ao: []ApplyOption{
AllowUpdateIf(func(current, desired *Secret) bool {
return true
}),
},
},
want: want{
err: nil,
},
},
"SuccessfulAddMetadata": {
reason: "Should successfully add new metadata.",
args: args{
client: &fake.LogicalClient{
ReadFn: func(path string) (*api.Secret, error) {
return &api.Secret{
Data: map[string]interface{}{
"key1": "val1",
"key2": "val2",
},
}, nil
},
WriteFn: func(path string, data map[string]interface{}) (*api.Secret, error) {
if diff := cmp.Diff(filepath.Join(mountPath, secretName), path); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
if diff := cmp.Diff(map[string]interface{}{
"key1": "val1",
"key2": "val2",
metadataPrefix + "foo": "bar",
metadataPrefix + "baz": "qux",
}, data); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
return nil, nil
},
},
path: secretName,
in: NewSecret(map[string]string{
"key1": "val1",
"key2": "val2",
}, map[string]string{
"foo": "bar",
"baz": "qux",
}),
},
want: want{
err: nil,
},
},
"SuccessfulUpdateMetadata": {
reason: "Should successfully update metadata by overriding the existing ones.",
args: args{
client: &fake.LogicalClient{
ReadFn: func(path string) (*api.Secret, error) {
return &api.Secret{
Data: map[string]interface{}{
"key1": "val1",
"key2": "val2",
metadataPrefix + "old": "meta",
},
}, nil
},
WriteFn: func(path string, data map[string]interface{}) (*api.Secret, error) {
if diff := cmp.Diff(filepath.Join(mountPath, secretName), path); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
if diff := cmp.Diff(map[string]interface{}{
"key1": "val1",
"key2": "val2",
metadataPrefix + "old": "meta",
metadataPrefix + "foo": "bar",
}, data); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
return nil, nil
},
},
path: secretName,
in: NewSecret(map[string]string{
"key1": "val1",
"key2": "val2",
}, map[string]string{
"old": "meta",
"foo": "bar",
}),
},
want: want{
err: nil,
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
k := NewV1Client(tc.args.client, mountPath)
err := k.Apply(tc.args.path, tc.args.in, tc.args.ao...)
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nv1Client.Apply(...): -want error, +got error:\n%s", tc.reason, diff)
}
})
}
}
func TestV1ClientDelete(t *testing.T) {
type args struct {
client LogicalClient
path string
}
type want struct {
err error
}
cases := map[string]struct {
reason string
args
want
}{
"ErrorWhileDeletingSecret": {
reason: "Should return a proper error if deleting secret failed.",
args: args{
client: &fake.LogicalClient{
DeleteFn: func(path string) (*api.Secret, error) {
return nil, errBoom
},
},
path: secretName,
},
want: want{
err: errors.Wrap(errBoom, errDelete),
},
},
"SecretAlreadyDeleted": {
reason: "Should return success if secret already deleted.",
args: args{
client: &fake.LogicalClient{
DeleteFn: func(path string) (*api.Secret, error) {
// Vault logical client returns both error and secret as
// nil if secret does not exist.
return nil, nil
},
},
path: secretName,
},
want: want{
err: nil,
},
},
"SuccessfulDelete": {
reason: "Should return no error after successful deletion of a v1 secret.",
args: args{
client: &fake.LogicalClient{
DeleteFn: func(path string) (*api.Secret, error) {
if diff := cmp.Diff(filepath.Join(mountPath, secretName), path); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
return &api.Secret{
Data: map[string]interface{}{
"foo": "bar",
},
}, nil
},
},
path: secretName,
},
want: want{},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
k := NewV1Client(tc.args.client, mountPath)
err := k.Delete(tc.args.path)
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nv1Client.Get(...): -want error, +got error:\n%s", tc.reason, diff)
}
})
}
}

View File

@ -0,0 +1,219 @@
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kv
import (
"encoding/json"
"path/filepath"
"github.com/crossplane/crossplane-runtime/pkg/fieldpath"
"github.com/hashicorp/vault/api"
"github.com/crossplane/crossplane-runtime/pkg/errors"
"github.com/crossplane/crossplane-runtime/pkg/resource"
)
const (
errWriteMetadata = "cannot write secret metadata Data"
)
// V2Client is a Vault KV V2 Secrets Engine client.
// https://www.vaultproject.io/api/secret/kv/kv-v2
type V2Client struct {
client LogicalClient
mountPath string
}
// NewV2Client returns a new V2Client.
func NewV2Client(logical LogicalClient, mountPath string) *V2Client {
kv := &V2Client{
client: logical,
mountPath: mountPath,
}
return kv
}
// Get returns a Secret at a given path.
func (c *V2Client) Get(path string, secret *Secret) error {
s, err := c.client.Read(c.dataPath(path))
if err != nil {
return errors.Wrap(err, errRead)
}
if s == nil {
return errors.New(ErrNotFound)
}
return c.parseAsKVSecret(s, secret)
}
// Apply applies given Secret at path by patching its Data and setting
// provided custom metadata.
func (c *V2Client) Apply(path string, secret *Secret, ao ...ApplyOption) error {
existing := &Secret{}
err := c.Get(path, existing)
if resource.Ignore(IsNotFound, err) != nil {
return errors.Wrap(err, errGet)
}
if !IsNotFound(err) {
for _, o := range ao {
if err = o(existing, secret); err != nil {
return err
}
}
}
// We write metadata first to ensure we set ownership (with the label) of
// the secret before writing any data. This is to prevent situations where
// secret create with some data but owner not set.
mp, changed := metadataPayload(existing.CustomMeta, secret.CustomMeta)
if changed {
if _, err := c.client.Write(c.metadataPath(path), mp); err != nil {
return errors.Wrap(err, errWriteMetadata)
}
}
dp, changed := dataPayload(existing, secret)
if changed {
if _, err := c.client.Write(c.dataPath(path), dp); err != nil {
return errors.Wrap(err, errWriteData)
}
}
return nil
}
// Delete deletes Secret at the given path.
func (c *V2Client) Delete(path string) error {
// Note(turkenh): With V2Client, we need to delete metadata and all versions:
// https://www.vaultproject.io/api-docs/secret/kv/kv-v2#delete-metadata-and-all-versions
_, err := c.client.Delete(c.metadataPath(path))
return errors.Wrap(err, errDelete)
}
func dataPayload(existing, new *Secret) (map[string]interface{}, bool) {
data := make(map[string]string, len(existing.Data)+len(new.Data))
for k, v := range existing.Data {
data[k] = v
}
changed := false
for k, v := range new.Data {
if ev, ok := existing.Data[k]; !ok || ev != v {
changed = true
data[k] = v
}
}
ver := json.Number("0")
if existing.version != "" {
ver = existing.version
}
return map[string]interface{}{
"options": map[string]interface{}{
"cas": ver,
},
"data": data,
}, changed
}
func metadataPayload(existing, new map[string]string) (map[string]interface{}, bool) {
payload := map[string]interface{}{
"custom_metadata": new,
}
if len(existing) != len(new) {
return payload, true
}
for k, v := range new {
if ev, ok := existing[k]; !ok || ev != v {
return payload, true
}
}
return payload, false
}
func (c *V2Client) parseAsKVSecret(s *api.Secret, kv *Secret) error {
// Note(turkenh): kv v2 secrets contains another "data" and "metadata"
// blocks inside the top level generic "Data" field.
// https://www.vaultproject.io/api/secret/kv/kv-v2#sample-response-1
paved := fieldpath.Pave(s.Data)
if err := parseSecretData(paved, kv); err != nil {
return err
}
if err := parseSecretMeta(paved, kv); err != nil {
return err
}
return nil
}
func parseSecretData(payload *fieldpath.Paved, kv *Secret) error {
sData := map[string]interface{}{}
err := payload.GetValueInto("data", &sData)
if fieldpath.IsNotFound(err) {
return nil
}
if err != nil {
return err
}
kv.Data = make(map[string]string, len(sData))
for key, val := range sData {
if sVal, ok := val.(string); ok {
kv.Data[key] = sVal
}
}
return nil
}
func parseSecretMeta(payload *fieldpath.Paved, kv *Secret) error {
sMeta := map[string]interface{}{}
err := payload.GetValueInto("metadata", &sMeta)
if fieldpath.IsNotFound(err) {
return nil
}
if err != nil {
return err
}
pavedMeta := fieldpath.Pave(sMeta)
if err = pavedMeta.GetValueInto("version", &kv.version); resource.Ignore(fieldpath.IsNotFound, err) != nil {
return err
}
customMeta := map[string]interface{}{}
err = pavedMeta.GetValueInto("custom_metadata", &customMeta)
if fieldpath.IsNotFound(err) {
return nil
}
if err != nil {
return err
}
kv.CustomMeta = make(map[string]string, len(customMeta))
for key, val := range customMeta {
if sVal, ok := val.(string); ok {
kv.CustomMeta[key] = sVal
}
}
return nil
}
func (c *V2Client) dataPath(secretPath string) string {
return filepath.Join(c.mountPath, "data", secretPath)
}
func (c *V2Client) metadataPath(secretPath string) string {
return filepath.Join(c.mountPath, "metadata", secretPath)
}

View File

@ -0,0 +1,682 @@
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kv
import (
"encoding/json"
"path/filepath"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/hashicorp/vault/api"
"github.com/crossplane/crossplane-runtime/pkg/connection/store/vault/kv/fake"
"github.com/crossplane/crossplane-runtime/pkg/errors"
"github.com/crossplane/crossplane-runtime/pkg/resource"
"github.com/crossplane/crossplane-runtime/pkg/test"
)
const (
mountPath = "test-secrets/"
secretName = "conn-unittests"
)
var (
errBoom = errors.New("boom")
)
func TestV2ClientGet(t *testing.T) {
type args struct {
client LogicalClient
path string
}
type want struct {
err error
out *Secret
}
cases := map[string]struct {
reason string
args
want
}{
"ErrorWhileGettingSecret": {
reason: "Should return a proper error if getting secret failed.",
args: args{
client: &fake.LogicalClient{
ReadFn: func(path string) (*api.Secret, error) {
return nil, errBoom
},
},
path: secretName,
},
want: want{
err: errors.Wrap(errBoom, errRead),
out: NewSecret(nil, nil),
},
},
"SecretNotFound": {
reason: "Should return a notFound error if secret does not exist.",
args: args{
client: &fake.LogicalClient{
ReadFn: func(path string) (*api.Secret, error) {
// Vault logical client returns both error and secret as
// nil if secret does not exist.
return nil, nil
},
},
path: secretName,
},
want: want{
err: errors.New(ErrNotFound),
out: NewSecret(nil, nil),
},
},
"SuccessfulGetNoData": {
reason: "Should successfully return secret from v2 KV engine even it only contains metadata.",
args: args{
client: &fake.LogicalClient{
ReadFn: func(path string) (*api.Secret, error) {
if diff := cmp.Diff(filepath.Join(mountPath, "data", secretName), path); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
return &api.Secret{
// Using sample response here:
// https://www.vaultproject.io/api/secret/kv/kv-v2#sample-response-1
Data: map[string]interface{}{
"metadata": map[string]interface{}{
"created_time": "2018-03-22T02:24:06.945319214Z",
"custom_metadata": map[string]interface{}{
"owner": "jdoe",
"mission_critical": "false",
},
"deletion_time": "",
"destroyed": false,
},
},
}, nil
},
},
path: secretName,
},
want: want{
out: NewSecret(nil, map[string]string{
"owner": "jdoe",
"mission_critical": "false",
}),
},
},
"SuccessfulGetNoMetadata": {
reason: "Should successfully return secret from v2 KV engine even it only contains data.",
args: args{
client: &fake.LogicalClient{
ReadFn: func(path string) (*api.Secret, error) {
if diff := cmp.Diff(filepath.Join(mountPath, "data", secretName), path); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
return &api.Secret{
// Using sample response here:
// https://www.vaultproject.io/api/secret/kv/kv-v2#sample-response-1
Data: map[string]interface{}{
"data": map[string]interface{}{
"foo": "bar",
},
},
}, nil
},
},
path: secretName,
},
want: want{
out: NewSecret(map[string]string{
"foo": "bar",
}, nil),
},
},
"SuccessfulGet": {
reason: "Should successfully return secret from v2 KV engine.",
args: args{
client: &fake.LogicalClient{
ReadFn: func(path string) (*api.Secret, error) {
if diff := cmp.Diff(filepath.Join(mountPath, "data", secretName), path); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
return &api.Secret{
// Using sample response here:
// https://www.vaultproject.io/api/secret/kv/kv-v2#sample-response-1
Data: map[string]interface{}{
"data": map[string]interface{}{
"foo": "bar",
},
"metadata": map[string]interface{}{
"created_time": "2018-03-22T02:24:06.945319214Z",
"custom_metadata": map[string]interface{}{
"owner": "jdoe",
"mission_critical": "false",
},
"deletion_time": "",
"destroyed": false,
"version": 2,
},
},
}, nil
},
},
path: secretName,
},
want: want{
out: NewSecret(map[string]string{
"foo": "bar",
}, map[string]string{
"owner": "jdoe",
"mission_critical": "false",
}),
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
k := NewV2Client(tc.args.client, mountPath)
s := Secret{}
err := k.Get(tc.args.path, &s)
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nv2Client.Get(...): -want error, +got error:\n%s", tc.reason, diff)
}
if diff := cmp.Diff(tc.want.out, &s, cmpopts.IgnoreUnexported(Secret{})); diff != "" {
t.Errorf("\n%s\nv2Client.Get(...): -want, +got:\n%s", tc.reason, diff)
}
})
}
}
func TestV2ClientApply(t *testing.T) {
type args struct {
client LogicalClient
in *Secret
path string
ao []ApplyOption
}
type want struct {
err error
}
cases := map[string]struct {
reason string
args
want
}{
"ErrorWhileReadingSecret": {
reason: "Should return a proper error if reading secret failed.",
args: args{
client: &fake.LogicalClient{
ReadFn: func(path string) (*api.Secret, error) {
return nil, errBoom
},
},
path: secretName,
},
want: want{
err: errors.Wrap(errors.Wrap(errBoom, errRead), errGet),
},
},
"ErrorWhileWritingData": {
reason: "Should return a proper error if writing secret failed.",
args: args{
client: &fake.LogicalClient{
ReadFn: func(path string) (*api.Secret, error) {
return &api.Secret{
Data: map[string]interface{}{
"data": map[string]string{
"key1": "val1",
"key2": "val2",
},
"metadata": map[string]interface{}{
"custom_metadata": map[string]interface{}{
"foo": "bar",
"baz": "qux",
},
"version": json.Number("2"),
},
},
}, nil
},
WriteFn: func(path string, data map[string]interface{}) (*api.Secret, error) {
return nil, errBoom
},
},
path: secretName,
in: NewSecret(map[string]string{
"key1": "val1updated",
"key3": "val3",
}, map[string]string{
"foo": "bar",
"baz": "qux",
}),
},
want: want{
err: errors.Wrap(errBoom, errWriteData),
},
},
"ErrorWhileWritingMetadata": {
reason: "Should return a proper error if writing secret metadata failed.",
args: args{
client: &fake.LogicalClient{
ReadFn: func(path string) (*api.Secret, error) {
return &api.Secret{
Data: map[string]interface{}{
"data": map[string]string{
"key1": "val1",
"key2": "val2",
},
"metadata": map[string]interface{}{
"custom_metadata": map[string]interface{}{
"foo": "bar",
},
"version": json.Number("2"),
},
},
}, nil
},
WriteFn: func(path string, data map[string]interface{}) (*api.Secret, error) {
return nil, errBoom
},
},
path: secretName,
in: NewSecret(map[string]string{
"key1": "val1",
"key2": "val2",
}, map[string]string{
"foo": "baz",
}),
},
want: want{
err: errors.Wrap(errBoom, errWriteMetadata),
},
},
"AlreadyUpToDate": {
reason: "Should not perform a write if a v2 secret is already up to date.",
args: args{
client: &fake.LogicalClient{
ReadFn: func(path string) (*api.Secret, error) {
return &api.Secret{
// Using sample response here:
// https://www.vaultproject.io/api/secret/kv/kv-v2#sample-response-1
Data: map[string]interface{}{
"data": map[string]interface{}{
"foo": "bar",
},
"metadata": map[string]interface{}{
"created_time": "2018-03-22T02:24:06.945319214Z",
"custom_metadata": map[string]interface{}{
"owner": "jdoe",
"mission_critical": "false",
},
"deletion_time": "",
"destroyed": false,
"version": 2,
},
},
}, nil
},
WriteFn: func(path string, data map[string]interface{}) (*api.Secret, error) {
return nil, errors.New("no write operation expected")
},
},
path: secretName,
in: NewSecret(map[string]string{
"foo": "bar",
}, map[string]string{
"owner": "jdoe",
"mission_critical": "false",
}),
},
want: want{
err: nil,
},
},
"SuccessfulCreate": {
reason: "Should successfully create with new data if secret does not exists.",
args: args{
client: &fake.LogicalClient{
ReadFn: func(path string) (*api.Secret, error) {
// Vault logical client returns both error and secret as
// nil if secret does not exist.
return nil, nil
},
WriteFn: func(path string, data map[string]interface{}) (*api.Secret, error) {
if diff := cmp.Diff(filepath.Join(mountPath, "data", secretName), path); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
if diff := cmp.Diff(map[string]interface{}{
"data": map[string]string{
"key1": "val1",
"key2": "val2",
},
"options": map[string]interface{}{
"cas": json.Number("0"),
},
}, data); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
return nil, nil
},
},
path: secretName,
in: NewSecret(map[string]string{
"key1": "val1",
"key2": "val2",
}, nil),
},
want: want{
err: nil,
},
},
"UpdateNotAllowed": {
reason: "Should return not allowed error if update is not allowed.",
args: args{
client: &fake.LogicalClient{
ReadFn: func(path string) (*api.Secret, error) {
return &api.Secret{
Data: map[string]interface{}{
"data": map[string]interface{}{
"key1": "val1",
"key2": "val2",
},
"metadata": map[string]interface{}{
"custom_metadata": map[string]interface{}{
"foo": "bar",
"baz": "qux",
},
"version": json.Number("2"),
},
},
}, nil
},
WriteFn: func(path string, data map[string]interface{}) (*api.Secret, error) {
if diff := cmp.Diff(filepath.Join(mountPath, "data", secretName), path); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
if diff := cmp.Diff(map[string]interface{}{
"data": map[string]string{
"key1": "val1updated",
"key2": "val2",
"key3": "val3",
},
"options": map[string]interface{}{
"cas": json.Number("2"),
},
}, data); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
return nil, nil
},
},
path: secretName,
in: NewSecret(map[string]string{
"key1": "val1updated",
"key3": "val3",
}, map[string]string{
"foo": "bar",
"baz": "qux",
}),
ao: []ApplyOption{
AllowUpdateIf(func(current, desired *Secret) bool {
return false
}),
},
},
want: want{
err: resource.NewNotAllowed(errUpdateNotAllowed),
},
},
"SuccessfulUpdateData": {
reason: "Should successfully update by appending new data to existing ones.",
args: args{
client: &fake.LogicalClient{
ReadFn: func(path string) (*api.Secret, error) {
return &api.Secret{
Data: map[string]interface{}{
"data": map[string]interface{}{
"key1": "val1",
"key2": "val2",
},
"metadata": map[string]interface{}{
"custom_metadata": map[string]interface{}{
"foo": "bar",
"baz": "qux",
},
"version": json.Number("2"),
},
},
}, nil
},
WriteFn: func(path string, data map[string]interface{}) (*api.Secret, error) {
if diff := cmp.Diff(filepath.Join(mountPath, "data", secretName), path); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
if diff := cmp.Diff(map[string]interface{}{
"data": map[string]string{
"key1": "val1updated",
"key2": "val2",
"key3": "val3",
},
"options": map[string]interface{}{
"cas": json.Number("2"),
},
}, data); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
return nil, nil
},
},
path: secretName,
in: NewSecret(map[string]string{
"key1": "val1updated",
"key3": "val3",
}, map[string]string{
"foo": "bar",
"baz": "qux",
}),
ao: []ApplyOption{
AllowUpdateIf(func(current, desired *Secret) bool {
return true
}),
},
},
want: want{
err: nil,
},
},
"SuccessfulAddMetadata": {
reason: "Should successfully add new metadata.",
args: args{
client: &fake.LogicalClient{
ReadFn: func(path string) (*api.Secret, error) {
return &api.Secret{
Data: map[string]interface{}{
"data": map[string]interface{}{
"key1": "val1",
"key2": "val2",
},
"metadata": map[string]interface{}{
"version": json.Number("2"),
},
},
}, nil
},
WriteFn: func(path string, data map[string]interface{}) (*api.Secret, error) {
if diff := cmp.Diff(filepath.Join(mountPath, "metadata", secretName), path); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
if diff := cmp.Diff(map[string]interface{}{
"custom_metadata": map[string]string{
"foo": "bar",
"baz": "qux",
},
}, data); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
return nil, nil
},
},
path: secretName,
in: NewSecret(map[string]string{
"key1": "val1",
"key2": "val2",
}, map[string]string{
"foo": "bar",
"baz": "qux",
}),
},
want: want{
err: nil,
},
},
"SuccessfulUpdateMetadata": {
reason: "Should successfully update metadata by overriding the existing ones.",
args: args{
client: &fake.LogicalClient{
ReadFn: func(path string) (*api.Secret, error) {
return &api.Secret{
Data: map[string]interface{}{
"data": map[string]interface{}{
"key1": "val1",
"key2": "val2",
},
"metadata": map[string]interface{}{
"custom_metadata": map[string]interface{}{
"old": "meta",
},
"version": json.Number("2"),
},
},
}, nil
},
WriteFn: func(path string, data map[string]interface{}) (*api.Secret, error) {
if diff := cmp.Diff(filepath.Join(mountPath, "metadata", secretName), path); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
if diff := cmp.Diff(map[string]interface{}{
"custom_metadata": map[string]string{
"foo": "bar",
"baz": "qux",
},
}, data); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
return nil, nil
},
},
path: secretName,
in: NewSecret(map[string]string{
"key1": "val1",
"key2": "val2",
}, map[string]string{
"foo": "bar",
"baz": "qux",
}),
},
want: want{
err: nil,
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
k := NewV2Client(tc.args.client, mountPath)
err := k.Apply(tc.args.path, tc.args.in, tc.args.ao...)
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nv2Client.Apply(...): -want error, +got error:\n%s", tc.reason, diff)
}
})
}
}
func TestV2ClientDelete(t *testing.T) {
type args struct {
client LogicalClient
path string
}
type want struct {
err error
}
cases := map[string]struct {
reason string
args
want
}{
"ErrorWhileDeletingSecret": {
reason: "Should return a proper error if deleting secret failed.",
args: args{
client: &fake.LogicalClient{
DeleteFn: func(path string) (*api.Secret, error) {
return nil, errBoom
},
},
path: secretName,
},
want: want{
err: errors.Wrap(errBoom, errDelete),
},
},
"SecretAlreadyDeleted": {
reason: "Should return success if secret already deleted.",
args: args{
client: &fake.LogicalClient{
DeleteFn: func(path string) (*api.Secret, error) {
// Vault logical client returns both error and secret as
// nil if secret does not exist.
return nil, nil
},
},
path: secretName,
},
want: want{
err: nil,
},
},
"SuccessfulDelete": {
reason: "Should return no error after successful deletion of a v2 secret.",
args: args{
client: &fake.LogicalClient{
DeleteFn: func(path string) (*api.Secret, error) {
if diff := cmp.Diff(filepath.Join(mountPath, "metadata", secretName), path); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
return nil, nil
},
},
path: secretName,
},
want: want{},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
k := NewV2Client(tc.args.client, mountPath)
err := k.Delete(tc.args.path)
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nv2Client.Get(...): -want error, +got error:\n%s", tc.reason, diff)
}
})
}
}

View File

@ -0,0 +1,247 @@
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vault
import (
"context"
"crypto/x509"
"net/http"
"path/filepath"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/hashicorp/vault/api"
"sigs.k8s.io/controller-runtime/pkg/client"
v1 "github.com/crossplane/crossplane-runtime/apis/common/v1"
"github.com/crossplane/crossplane-runtime/pkg/connection/store"
"github.com/crossplane/crossplane-runtime/pkg/connection/store/vault/kv"
"github.com/crossplane/crossplane-runtime/pkg/errors"
"github.com/crossplane/crossplane-runtime/pkg/resource"
)
// Error strings.
const (
errNoConfig = "no Vault config provided"
errNewClient = "cannot create new client"
errExtractCABundle = "cannot extract ca bundle"
errAppendCABundle = "cannot append ca bundle"
errExtractToken = "cannot extract token"
errNoTokenProvided = "token auth configured but no token provided"
errGet = "cannot get secret"
errApply = "cannot apply secret"
errDelete = "cannot delete secret"
)
// KVClient is a Vault AdditiveKVClient Secrets engine client that supports both v1 and v2.
type KVClient interface {
Get(path string, secret *kv.Secret) error
Apply(path string, secret *kv.Secret, ao ...kv.ApplyOption) error
Delete(path string) error
}
// SecretStore is a Vault Secret Store.
type SecretStore struct {
client KVClient
defaultParentPath string
}
// NewSecretStore returns a new Vault SecretStore.
func NewSecretStore(ctx context.Context, kube client.Client, cfg v1.SecretStoreConfig) (*SecretStore, error) { // nolint: gocyclo
// NOTE(turkenh): Adding linter exception for gocyclo since this function
// went a little over the limit due to the switch statements not because of
// some complex logic.
if cfg.Vault == nil {
return nil, errors.New(errNoConfig)
}
vCfg := api.DefaultConfig()
vCfg.Address = cfg.Vault.Server
if cfg.Vault.CABundle != nil {
ca, err := resource.CommonCredentialExtractor(ctx, cfg.Vault.CABundle.Source, kube, cfg.Vault.CABundle.CommonCredentialSelectors)
if err != nil {
return nil, errors.Wrap(err, errExtractCABundle)
}
pool := x509.NewCertPool()
if ok := pool.AppendCertsFromPEM(ca); !ok {
return nil, errors.Wrap(err, errAppendCABundle)
}
vCfg.HttpClient.Transport.(*http.Transport).TLSClientConfig.RootCAs = pool
}
c, err := api.NewClient(vCfg)
if err != nil {
return nil, errors.Wrap(err, errNewClient)
}
switch cfg.Vault.Auth.Method {
case v1.VaultAuthToken:
if cfg.Vault.Auth.Token == nil {
return nil, errors.New(errNoTokenProvided)
}
t, err := resource.CommonCredentialExtractor(ctx, cfg.Vault.Auth.Token.Source, kube, cfg.Vault.Auth.Token.CommonCredentialSelectors)
if err != nil {
return nil, errors.Wrap(err, errExtractToken)
}
c.SetToken(string(t))
default:
return nil, errors.Errorf("%q is not supported as an auth method", cfg.Vault.Auth.Method)
}
var kvClient KVClient
switch *cfg.Vault.Version {
case v1.VaultKVVersionV1:
kvClient = kv.NewV1Client(c.Logical(), cfg.Vault.MountPath)
case v1.VaultKVVersionV2:
kvClient = kv.NewV2Client(c.Logical(), cfg.Vault.MountPath)
}
return &SecretStore{
client: kvClient,
defaultParentPath: cfg.DefaultScope,
}, nil
}
// ReadKeyValues reads and returns key value pairs for a given Vault Secret.
func (ss *SecretStore) ReadKeyValues(_ context.Context, n store.ScopedName, s *store.Secret) error {
kvs := &kv.Secret{}
if err := ss.client.Get(ss.path(n), kvs); resource.Ignore(kv.IsNotFound, err) != nil {
return errors.Wrap(err, errGet)
}
s.ScopedName = n
s.Data = keyValuesFromData(kvs.Data)
if len(kvs.CustomMeta) > 0 {
s.Metadata = &v1.ConnectionSecretMetadata{
Labels: kvs.CustomMeta,
}
}
return nil
}
// WriteKeyValues writes key value pairs to a given Vault Secret.
func (ss *SecretStore) WriteKeyValues(_ context.Context, s *store.Secret, wo ...store.WriteOption) (changed bool, err error) {
ao := applyOptions(wo...)
ao = append(ao, kv.AllowUpdateIf(func(current, desired *kv.Secret) bool {
return !cmp.Equal(current, desired, cmpopts.EquateEmpty(), cmpopts.IgnoreUnexported(kv.Secret{}))
}))
err = ss.client.Apply(ss.path(s.ScopedName), kv.NewSecret(dataFromKeyValues(s.Data), s.GetLabels()), ao...)
if resource.IsNotAllowed(err) {
// The update was not allowed because it was a no-op.
return false, nil
}
if err != nil {
return false, errors.Wrap(err, errApply)
}
return true, nil
}
// DeleteKeyValues delete key value pairs from a given Vault Secret.
// If no kv specified, the whole secret instance is deleted.
// If kv specified, those would be deleted and secret instance will be deleted
// only if there is no Data left.
func (ss *SecretStore) DeleteKeyValues(_ context.Context, s *store.Secret, do ...store.DeleteOption) error {
Secret := &kv.Secret{}
err := ss.client.Get(ss.path(s.ScopedName), Secret)
if kv.IsNotFound(err) {
// Secret already deleted, nothing to do.
return nil
}
if err != nil {
return errors.Wrap(err, errGet)
}
for _, o := range do {
if err = o(context.Background(), s); err != nil {
return err
}
}
for k := range s.Data {
delete(Secret.Data, k)
}
if len(s.Data) == 0 || len(Secret.Data) == 0 {
// Secret is deleted only if:
// - No kv to delete specified as input
// - No data left in the secret
return errors.Wrap(ss.client.Delete(ss.path(s.ScopedName)), errDelete)
}
// If there are still keys left, update the secret with the remaining.
return errors.Wrap(ss.client.Apply(ss.path(s.ScopedName), Secret), errApply)
}
func (ss *SecretStore) path(s store.ScopedName) string {
if s.Scope != "" {
return filepath.Join(s.Scope, s.Name)
}
return filepath.Join(ss.defaultParentPath, s.Name)
}
func applyOptions(wo ...store.WriteOption) []kv.ApplyOption {
ao := make([]kv.ApplyOption, len(wo))
for i := range wo {
o := wo[i]
ao[i] = func(current, desired *kv.Secret) error {
cs := &store.Secret{
Metadata: &v1.ConnectionSecretMetadata{
Labels: current.CustomMeta,
},
Data: keyValuesFromData(current.Data),
}
ds := &store.Secret{
Metadata: &v1.ConnectionSecretMetadata{
Labels: desired.CustomMeta,
},
Data: keyValuesFromData(desired.Data),
}
if err := o(context.Background(), cs, ds); err != nil {
return err
}
desired.CustomMeta = ds.GetLabels()
desired.Data = dataFromKeyValues(ds.Data)
return nil
}
}
return ao
}
func keyValuesFromData(data map[string]string) store.KeyValues {
if len(data) == 0 {
return nil
}
kv := make(store.KeyValues, len(data))
for k, v := range data {
kv[k] = []byte(v)
}
return kv
}
func dataFromKeyValues(kv store.KeyValues) map[string]string {
if len(kv) == 0 {
return nil
}
data := make(map[string]string, len(kv))
for k, v := range kv {
// NOTE(turkenh): vault stores values as strings. So we convert []byte
// to string before writing to Vault.
data[k] = string(v)
}
return data
}

View File

@ -0,0 +1,827 @@
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vault
import (
"context"
"path/filepath"
"testing"
"github.com/google/go-cmp/cmp"
corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/client"
v1 "github.com/crossplane/crossplane-runtime/apis/common/v1"
"github.com/crossplane/crossplane-runtime/pkg/connection/store"
"github.com/crossplane/crossplane-runtime/pkg/connection/store/vault/fake"
"github.com/crossplane/crossplane-runtime/pkg/connection/store/vault/kv"
"github.com/crossplane/crossplane-runtime/pkg/errors"
"github.com/crossplane/crossplane-runtime/pkg/test"
)
const (
parentPathDefault = "crossplane-system"
secretName = "conn-unittests"
)
var (
errBoom = errors.New("boom")
)
func TestSecretStoreReadKeyValues(t *testing.T) {
type args struct {
client KVClient
defaultParentPath string
name store.ScopedName
}
type want struct {
out *store.Secret
err error
}
cases := map[string]struct {
reason string
args
want
}{
"ErrorWhileGetting": {
reason: "Should return a proper error if secret cannot be obtained",
args: args{
client: &fake.KVClient{
GetFn: func(path string, secret *kv.Secret) error {
return errBoom
},
},
defaultParentPath: parentPathDefault,
name: store.ScopedName{
Name: secretName,
},
},
want: want{
out: &store.Secret{},
err: errors.Wrap(errBoom, errGet),
},
},
"SuccessfulGetWithDefaultScope": {
reason: "Should return key values from a secret with default scope",
args: args{
client: &fake.KVClient{
GetFn: func(path string, secret *kv.Secret) error {
if diff := cmp.Diff(filepath.Join(parentPathDefault, secretName), path); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
secret.Data = map[string]string{
"key1": "val1",
"key2": "val2",
}
return nil
},
},
defaultParentPath: parentPathDefault,
name: store.ScopedName{
Name: secretName,
},
},
want: want{
out: &store.Secret{
ScopedName: store.ScopedName{
Name: secretName,
},
Data: store.KeyValues{
"key1": []byte("val1"),
"key2": []byte("val2"),
},
},
err: nil,
},
},
"SuccessfulGetWithCustomScope": {
reason: "Should return key values from a secret with custom scope",
args: args{
client: &fake.KVClient{
GetFn: func(path string, secret *kv.Secret) error {
if diff := cmp.Diff(filepath.Join("another-scope", secretName), path); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
secret.Data = map[string]string{
"key1": "val1",
"key2": "val2",
}
return nil
},
},
defaultParentPath: parentPathDefault,
name: store.ScopedName{
Name: secretName,
Scope: "another-scope",
},
},
want: want{
out: &store.Secret{
ScopedName: store.ScopedName{
Name: secretName,
Scope: "another-scope",
},
Data: store.KeyValues{
"key1": []byte("val1"),
"key2": []byte("val2"),
},
},
err: nil,
},
},
"SuccessfulGetWithMetadata": {
reason: "Should return both data and metadata.",
args: args{
client: &fake.KVClient{
GetFn: func(path string, secret *kv.Secret) error {
if diff := cmp.Diff(filepath.Join(parentPathDefault, secretName), path); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
secret.Data = map[string]string{
"key1": "val1",
"key2": "val2",
}
secret.CustomMeta = map[string]string{
"foo": "bar",
}
return nil
},
},
defaultParentPath: parentPathDefault,
name: store.ScopedName{
Name: secretName,
},
},
want: want{
out: &store.Secret{
ScopedName: store.ScopedName{
Name: secretName,
},
Data: store.KeyValues{
"key1": []byte("val1"),
"key2": []byte("val2"),
},
Metadata: &v1.ConnectionSecretMetadata{
Labels: map[string]string{
"foo": "bar",
},
},
},
err: nil,
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
ss := &SecretStore{
client: tc.args.client,
defaultParentPath: tc.args.defaultParentPath,
}
s := &store.Secret{}
err := ss.ReadKeyValues(context.Background(), tc.args.name, s)
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nss.ReadKeyValues(...): -want error, +got error:\n%s", tc.reason, diff)
}
if diff := cmp.Diff(tc.want.out, s); diff != "" {
t.Errorf("\n%s\nss.ReadKeyValues(...): -want, +got:\n%s", tc.reason, diff)
}
})
}
}
func TestSecretStoreWriteKeyValues(t *testing.T) {
type args struct {
client KVClient
defaultParentPath string
secret *store.Secret
wo []store.WriteOption
}
type want struct {
changed bool
err error
}
cases := map[string]struct {
reason string
args
want
}{
"ErrWhileApplying": {
reason: "Should successfully write key values",
args: args{
client: &fake.KVClient{
ApplyFn: func(path string, secret *kv.Secret, ao ...kv.ApplyOption) error {
return errBoom
},
},
defaultParentPath: parentPathDefault,
secret: &store.Secret{
ScopedName: store.ScopedName{
Name: secretName,
},
Data: store.KeyValues{
"key1": []byte("val1"),
"key2": []byte("val2"),
},
},
},
want: want{
err: errors.Wrap(errBoom, errApply),
},
},
"FailedWriteOption": {
reason: "Should return a proper error if supplied write option fails",
args: args{
client: &fake.KVClient{
ApplyFn: func(path string, secret *kv.Secret, ao ...kv.ApplyOption) error {
for _, o := range ao {
if err := o(&kv.Secret{}, secret); err != nil {
return err
}
}
return nil
},
},
defaultParentPath: parentPathDefault,
secret: &store.Secret{
ScopedName: store.ScopedName{
Name: secretName,
},
Data: store.KeyValues{
"key1": []byte("val1"),
"key2": []byte("val2"),
},
},
wo: []store.WriteOption{
func(ctx context.Context, current, desired *store.Secret) error {
return errBoom
},
},
},
want: want{
changed: false,
err: errors.Wrap(errBoom, errApply),
},
},
"SuccessfulWriteOption": {
reason: "Should return a no error if supplied write option succeeds",
args: args{
client: &fake.KVClient{
ApplyFn: func(path string, secret *kv.Secret, ao ...kv.ApplyOption) error {
for _, o := range ao {
if err := o(&kv.Secret{
Data: map[string]string{
"key1": "val1",
"key2": "val2",
},
CustomMeta: map[string]string{
"foo": "bar",
},
}, secret); err != nil {
return err
}
}
return nil
},
},
defaultParentPath: parentPathDefault,
secret: &store.Secret{
ScopedName: store.ScopedName{
Name: secretName,
},
Data: store.KeyValues{
"key1": []byte("val1"),
"key2": []byte("val2"),
},
},
wo: []store.WriteOption{
func(ctx context.Context, current, desired *store.Secret) error {
desired.Data["customkey"] = []byte("customval")
desired.Metadata = &v1.ConnectionSecretMetadata{
Labels: map[string]string{
"foo": "baz",
},
}
return nil
},
},
},
want: want{
changed: true,
},
},
"AlreadyUpToDate": {
reason: "Should return no error and changed as false if secret is already up to date",
args: args{
client: &fake.KVClient{
ApplyFn: func(path string, secret *kv.Secret, ao ...kv.ApplyOption) error {
for _, o := range ao {
if err := o(&kv.Secret{
Data: map[string]string{
"key1": "val1",
"key2": "val2",
},
}, secret); err != nil {
return err
}
}
return nil
},
},
defaultParentPath: parentPathDefault,
secret: &store.Secret{
ScopedName: store.ScopedName{
Name: secretName,
},
Data: store.KeyValues{
"key1": []byte("val1"),
"key2": []byte("val2"),
},
},
},
want: want{
changed: false,
err: nil,
},
},
"SuccessfulWrite": {
reason: "Should successfully write key values",
args: args{
client: &fake.KVClient{
ApplyFn: func(path string, secret *kv.Secret, ao ...kv.ApplyOption) error {
if diff := cmp.Diff(filepath.Join(parentPathDefault, secretName), path); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
if diff := cmp.Diff(map[string]string{
"key1": "val1",
"key2": "val2",
}, secret.Data); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
return nil
},
},
defaultParentPath: parentPathDefault,
secret: &store.Secret{
ScopedName: store.ScopedName{
Name: secretName,
},
Data: store.KeyValues{
"key1": []byte("val1"),
"key2": []byte("val2"),
},
},
},
want: want{
changed: true,
},
},
"SuccessfulWriteWithMetadata": {
reason: "Should successfully write key values",
args: args{
client: &fake.KVClient{
ApplyFn: func(path string, secret *kv.Secret, ao ...kv.ApplyOption) error {
if diff := cmp.Diff(filepath.Join(parentPathDefault, secretName), path); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
if diff := cmp.Diff(map[string]string{
"key1": "val1",
"key2": "val2",
}, secret.Data); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
if diff := cmp.Diff(map[string]string{
"foo": "bar",
}, secret.CustomMeta); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
return nil
},
},
defaultParentPath: parentPathDefault,
secret: &store.Secret{
ScopedName: store.ScopedName{
Name: secretName,
},
Metadata: &v1.ConnectionSecretMetadata{
Labels: map[string]string{
"foo": "bar",
},
},
Data: store.KeyValues{
"key1": []byte("val1"),
"key2": []byte("val2"),
},
},
},
want: want{
changed: true,
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
ss := &SecretStore{
client: tc.args.client,
defaultParentPath: tc.args.defaultParentPath,
}
changed, err := ss.WriteKeyValues(context.Background(), tc.args.secret, tc.args.wo...)
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nss.WriteKeyValues(...): -want error, +got error:\n%s", tc.reason, diff)
}
if diff := cmp.Diff(tc.want.changed, changed); diff != "" {
t.Errorf("\n%s\nss.WriteKeyValues(...): -want changed, +got changed:\n%s", tc.reason, diff)
}
})
}
}
func TestSecretStoreDeleteKeyValues(t *testing.T) {
type args struct {
client KVClient
defaultParentPath string
secret *store.Secret
do []store.DeleteOption
}
type want struct {
err error
}
cases := map[string]struct {
reason string
args
want
}{
"ErrorGettingSecret": {
reason: "Should return a proper error if getting secret fails.",
args: args{
client: &fake.KVClient{
GetFn: func(path string, secret *kv.Secret) error {
return errBoom
},
},
secret: &store.Secret{
ScopedName: store.ScopedName{
Name: secretName,
},
},
},
want: want{
err: errors.Wrap(errBoom, errGet),
},
},
"AlreadyDeleted": {
reason: "Should return no error if connection secret already deleted.",
args: args{
client: &fake.KVClient{
GetFn: func(path string, secret *kv.Secret) error {
return errors.New(kv.ErrNotFound)
},
},
secret: &store.Secret{
ScopedName: store.ScopedName{
Name: secretName,
},
},
},
want: want{
err: nil,
},
},
"DeletesSecretIfNoKVProvided": {
reason: "Should delete whole secret if no kv provided as input",
args: args{
client: &fake.KVClient{
GetFn: func(path string, secret *kv.Secret) error {
secret.Data = map[string]string{
"key1": "val1",
"key2": "val2",
"key3": "val3",
}
return nil
},
DeleteFn: func(path string) error {
return nil
},
},
secret: &store.Secret{
ScopedName: store.ScopedName{
Name: secretName,
},
},
},
want: want{
err: nil,
},
},
"ErrorUpdatingSecretWithRemaining": {
reason: "Should return a proper error if updating secret with remaining keys fails.",
args: args{
client: &fake.KVClient{
GetFn: func(path string, secret *kv.Secret) error {
secret.Data = map[string]string{
"key1": "val1",
"key2": "val2",
"key3": "val3",
}
return nil
},
ApplyFn: func(path string, secret *kv.Secret, ao ...kv.ApplyOption) error {
return errBoom
},
DeleteFn: func(path string) error {
return errors.New("unexpected delete call")
},
},
secret: &store.Secret{
ScopedName: store.ScopedName{
Name: secretName,
},
Data: map[string][]byte{
"key1": []byte("val1"),
"key2": []byte("val2"),
},
},
},
want: want{
err: errors.Wrap(errBoom, errApply),
},
},
"UpdatesSecretByRemovingProvidedKeys": {
reason: "Should only delete provided keys and should not delete secret if kv provided as input.",
args: args{
client: &fake.KVClient{
GetFn: func(path string, secret *kv.Secret) error {
secret.Data = map[string]string{
"key1": "val1",
"key2": "val2",
"key3": "val3",
}
return nil
},
ApplyFn: func(path string, secret *kv.Secret, ao ...kv.ApplyOption) error {
if diff := cmp.Diff(map[string]string{
"key3": "val3",
}, secret.Data); diff != "" {
t.Errorf("r: -want, +got:\n%s", diff)
}
return nil
},
DeleteFn: func(path string) error {
return errors.New("unexpected delete call")
},
},
secret: &store.Secret{
ScopedName: store.ScopedName{
Name: secretName,
},
Data: map[string][]byte{
"key1": []byte("val1"),
"key2": []byte("val2"),
},
},
},
want: want{
err: nil,
},
},
"ErrorDeletingSecret": {
reason: "Should return a proper error if deleting the secret after no keys left fails.",
args: args{
client: &fake.KVClient{
GetFn: func(path string, secret *kv.Secret) error {
secret.Data = map[string]string{
"key1": "val1",
"key2": "val2",
"key3": "val3",
}
return nil
},
DeleteFn: func(path string) error {
return errBoom
},
},
secret: &store.Secret{
ScopedName: store.ScopedName{
Name: secretName,
},
Data: map[string][]byte{
"key1": []byte("val1"),
"key2": []byte("val2"),
"key3": []byte("val3"),
},
},
},
want: want{
err: errors.Wrap(errBoom, errDelete),
},
},
"FailedDeleteOption": {
reason: "Should return a proper error if provided delete option fails.",
args: args{
client: &fake.KVClient{
GetFn: func(path string, secret *kv.Secret) error {
secret.Data = map[string]string{
"key1": "val1",
}
return nil
},
DeleteFn: func(path string) error {
return nil
},
},
secret: &store.Secret{
ScopedName: store.ScopedName{
Name: secretName,
},
},
do: []store.DeleteOption{
func(ctx context.Context, secret *store.Secret) error {
return errBoom
},
},
},
want: want{
err: errBoom,
},
},
"SuccessfulDeleteNoKeysLeft": {
reason: "Should delete the secret if no keys left.",
args: args{
client: &fake.KVClient{
GetFn: func(path string, secret *kv.Secret) error {
secret.Data = map[string]string{
"key1": "val1",
"key2": "val2",
"key3": "val3",
}
return nil
},
DeleteFn: func(path string) error {
return nil
},
},
secret: &store.Secret{
ScopedName: store.ScopedName{
Name: secretName,
},
Data: map[string][]byte{
"key1": []byte("val1"),
"key2": []byte("val2"),
"key3": []byte("val3"),
},
},
do: []store.DeleteOption{
func(ctx context.Context, secret *store.Secret) error {
return nil
},
},
},
want: want{
err: nil,
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
ss := &SecretStore{
client: tc.args.client,
defaultParentPath: tc.args.defaultParentPath,
}
err := ss.DeleteKeyValues(context.Background(), tc.args.secret, tc.args.do...)
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nss.ReadKeyValues(...): -want error, +got error:\n%s", tc.reason, diff)
}
})
}
}
func TestNewSecretStore(t *testing.T) {
kvv2 := v1.VaultKVVersionV2
type args struct {
kube client.Client
cfg v1.SecretStoreConfig
}
type want struct {
err error
}
cases := map[string]struct {
reason string
args
want
}{
"InvalidAuthConfig": {
reason: "Should return a proper error if vault auth configuration is not valid.",
args: args{
cfg: v1.SecretStoreConfig{
Vault: &v1.VaultSecretStoreConfig{
Auth: v1.VaultAuthConfig{
Method: v1.VaultAuthToken,
Token: nil,
},
},
},
},
want: want{
err: errors.New(errNoTokenProvided),
},
},
"NoTokenSecret": {
reason: "Should return a proper error if configured vault token secret does not exist.",
args: args{
kube: &test.MockClient{
MockGet: test.NewMockGetFn(nil, func(obj client.Object) error {
return kerrors.NewNotFound(schema.GroupResource{}, "vault-token")
}),
},
cfg: v1.SecretStoreConfig{
Vault: &v1.VaultSecretStoreConfig{
Auth: v1.VaultAuthConfig{
Method: v1.VaultAuthToken,
Token: &v1.VaultAuthTokenConfig{
Source: v1.CredentialsSourceSecret,
CommonCredentialSelectors: v1.CommonCredentialSelectors{
SecretRef: &v1.SecretKeySelector{
SecretReference: v1.SecretReference{
Name: "vault-token",
Namespace: "crossplane-system",
},
Key: "token",
},
},
},
},
},
},
},
want: want{
err: errors.Wrap(errors.Wrap(kerrors.NewNotFound(schema.GroupResource{}, "vault-token"), "cannot get credentials secret"), errExtractToken),
},
},
"SuccessfulStore": {
reason: "Should return no error after building store successfully.",
args: args{
kube: &test.MockClient{
MockGet: test.NewMockGetFn(nil, func(obj client.Object) error {
*obj.(*corev1.Secret) = corev1.Secret{
Data: map[string][]byte{
"token": []byte("t0ps3cr3t"),
},
}
return nil
}),
},
cfg: v1.SecretStoreConfig{
Vault: &v1.VaultSecretStoreConfig{
Version: &kvv2,
Auth: v1.VaultAuthConfig{
Method: v1.VaultAuthToken,
Token: &v1.VaultAuthTokenConfig{
Source: v1.CredentialsSourceSecret,
CommonCredentialSelectors: v1.CommonCredentialSelectors{
SecretRef: &v1.SecretKeySelector{
SecretReference: v1.SecretReference{
Name: "vault-token",
Namespace: "crossplane-system",
},
Key: "token",
},
},
},
},
},
},
},
want: want{
err: nil,
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
_, err := NewSecretStore(context.Background(), tc.args.kube, tc.args.cfg)
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nNewSecretStore(...): -want error, +got error:\n%s", tc.reason, diff)
}
})
}
}

46
pkg/connection/stores.go Normal file
View File

@ -0,0 +1,46 @@
/*
Copyright 2022 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package connection
import (
"context"
"sigs.k8s.io/controller-runtime/pkg/client"
v1 "github.com/crossplane/crossplane-runtime/apis/common/v1"
"github.com/crossplane/crossplane-runtime/pkg/connection/store/kubernetes"
"github.com/crossplane/crossplane-runtime/pkg/connection/store/vault"
"github.com/crossplane/crossplane-runtime/pkg/errors"
)
const (
errFmtUnknownSecretStore = "unknown secret store type: %q"
)
// RuntimeStoreBuilder builds and returns a Store for any supported Store type
// in a given config.
//
// All in-tree connection Store implementations needs to be registered here.
func RuntimeStoreBuilder(ctx context.Context, local client.Client, cfg v1.SecretStoreConfig) (Store, error) {
switch *cfg.Type {
case v1.SecretStoreKubernetes:
return kubernetes.NewSecretStore(ctx, local, cfg)
case v1.SecretStoreVault:
return vault.NewSecretStore(ctx, local, cfg)
}
return nil, errors.Errorf(errFmtUnknownSecretStore, *cfg.Type)
}

209
pkg/controller/engine.go Normal file
View File

@ -0,0 +1,209 @@
/*
Copyright 2020 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package controller provides utilties for working with controllers.
package controller
import (
"context"
"sync"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/source"
"github.com/crossplane/crossplane-runtime/pkg/errors"
)
// Error strings
const (
errCreateCache = "cannot create new cache"
errCreateController = "cannot create new controller"
errCrashCache = "cache error"
errCrashController = "controller error"
errWatch = "cannot setup watch"
)
// A NewCacheFn creates a new controller-runtime cache.
type NewCacheFn func(cfg *rest.Config, o cache.Options) (cache.Cache, error)
// A NewControllerFn creates a new controller-runtime controller.
type NewControllerFn func(name string, m manager.Manager, o controller.Options) (controller.Controller, error)
// The default new cache and new controller functions.
var (
DefaultNewCacheFn NewCacheFn = cache.New
DefaultNewControllerFn NewControllerFn = controller.NewUnmanaged
)
// An Engine manages the lifecycles of controller-runtime controllers (and their
// caches). The lifecycles of the controllers are not coupled to lifecycle of
// the engine, nor to the lifecycle of the controller manager it uses.
type Engine struct {
mgr manager.Manager
started map[string]context.CancelFunc
errors map[string]error
mx sync.RWMutex
newCache NewCacheFn
newCtrl NewControllerFn
}
// An EngineOption configures an Engine.
type EngineOption func(*Engine)
// WithNewCacheFn may be used to configure a different cache implementation.
// DefaultNewCacheFn is used by default.
func WithNewCacheFn(fn NewCacheFn) EngineOption {
return func(e *Engine) {
e.newCache = fn
}
}
// WithNewControllerFn may be used to configure a different controller
// implementation. DefaultNewControllerFn is used by default.
func WithNewControllerFn(fn NewControllerFn) EngineOption {
return func(e *Engine) {
e.newCtrl = fn
}
}
// NewEngine produces a new Engine.
func NewEngine(mgr manager.Manager, o ...EngineOption) *Engine {
e := &Engine{
mgr: mgr,
started: make(map[string]context.CancelFunc),
errors: make(map[string]error),
newCache: DefaultNewCacheFn,
newCtrl: DefaultNewControllerFn,
}
for _, eo := range o {
eo(e)
}
return e
}
// IsRunning indicates whether the named controller is running - i.e. whether it
// has been started and does not appear to have crashed.
func (e *Engine) IsRunning(name string) bool {
e.mx.RLock()
defer e.mx.RUnlock()
_, running := e.started[name]
return running
}
// Err returns any error encountered by the named controller. The returned error
// is always nil if the named controller is running.
func (e *Engine) Err(name string) error {
e.mx.RLock()
defer e.mx.RUnlock()
return e.errors[name]
}
// Stop the named controller.
func (e *Engine) Stop(name string) {
e.done(name, nil)
}
func (e *Engine) done(name string, err error) {
e.mx.Lock()
defer e.mx.Unlock()
stop, ok := e.started[name]
if ok {
stop()
delete(e.started, name)
}
// Don't overwrite the first error if done is called multiple times.
if e.errors[name] != nil {
return
}
e.errors[name] = err
}
// Watch an object.
type Watch struct {
kind client.Object
handler handler.EventHandler
predicates []predicate.Predicate
}
// For returns a Watch for the supplied kind of object. Events will be handled
// by the supplied EventHandler, and may be filtered by the supplied predicates.
func For(kind client.Object, h handler.EventHandler, p ...predicate.Predicate) Watch {
return Watch{kind: kind, handler: h, predicates: p}
}
// Start the named controller. Each controller is started with its own cache
// whose lifecycle is coupled to the controller. The controller is started with
// the supplied options, and configured with the supplied watches. Start does
// not block.
func (e *Engine) Start(name string, o controller.Options, w ...Watch) error {
if e.IsRunning(name) {
return nil
}
ctx, stop := context.WithCancel(context.Background())
e.mx.Lock()
e.started[name] = stop
e.errors[name] = nil
e.mx.Unlock()
// Each controller gets its own cache because there's currently no way to
// stop an informer. In practice a controller-runtime cache is a map of
// kinds to informers. If we delete the CRD for a kind we need to stop the
// relevant informer, or it will spew errors about the kind not existing. We
// work around this by stopping the entire cache.
ca, err := e.newCache(e.mgr.GetConfig(), cache.Options{Scheme: e.mgr.GetScheme(), Mapper: e.mgr.GetRESTMapper()})
if err != nil {
return errors.Wrap(err, errCreateCache)
}
ctrl, err := e.newCtrl(name, e.mgr, o)
if err != nil {
return errors.Wrap(err, errCreateController)
}
for _, wt := range w {
if err := ctrl.Watch(source.NewKindWithCache(wt.kind, ca), wt.handler, wt.predicates...); err != nil {
return errors.Wrap(err, errWatch)
}
}
go func() {
<-e.mgr.Elected()
e.done(name, errors.Wrap(ca.Start(ctx), errCrashCache))
}()
go func() {
<-e.mgr.Elected()
e.done(name, errors.Wrap(ctrl.Start(ctx), errCrashController))
}()
return nil
}

View File

@ -0,0 +1,186 @@
/*
Copyright 2020 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/source"
"github.com/crossplane/crossplane-runtime/pkg/errors"
"github.com/crossplane/crossplane-runtime/pkg/resource/fake"
"github.com/crossplane/crossplane-runtime/pkg/test"
)
type MockCache struct {
cache.Cache
MockStart func(stop context.Context) error
}
func (c *MockCache) Start(stop context.Context) error {
return c.MockStart(stop)
}
type MockController struct {
controller.Controller
MockStart func(stop context.Context) error
MockWatch func(s source.Source, h handler.EventHandler, p ...predicate.Predicate) error
}
func (c *MockController) Start(stop context.Context) error {
return c.MockStart(stop)
}
func (c *MockController) Watch(s source.Source, h handler.EventHandler, p ...predicate.Predicate) error {
return c.MockWatch(s, h, p...)
}
func TestEngine(t *testing.T) {
errBoom := errors.New("boom")
type args struct {
name string
o controller.Options
w []Watch
}
type want struct {
err error
crash error
}
cases := map[string]struct {
reason string
e *Engine
args args
want want
}{
"NewCacheError": {
reason: "Errors creating a new cache should be returned",
e: NewEngine(&fake.Manager{},
WithNewCacheFn(func(*rest.Config, cache.Options) (cache.Cache, error) { return nil, errBoom }),
),
args: args{
name: "coolcontroller",
},
want: want{
err: errors.Wrap(errBoom, errCreateCache),
},
},
"NewControllerError": {
reason: "Errors creating a new controller should be returned",
e: NewEngine(&fake.Manager{},
WithNewCacheFn(func(*rest.Config, cache.Options) (cache.Cache, error) { return nil, nil }),
WithNewControllerFn(func(string, manager.Manager, controller.Options) (controller.Controller, error) { return nil, errBoom }),
),
args: args{
name: "coolcontroller",
},
want: want{
err: errors.Wrap(errBoom, errCreateController),
},
},
"WatchError": {
reason: "Errors adding a watch should be returned",
e: NewEngine(&fake.Manager{},
WithNewCacheFn(func(*rest.Config, cache.Options) (cache.Cache, error) { return nil, nil }),
WithNewControllerFn(func(string, manager.Manager, controller.Options) (controller.Controller, error) {
c := &MockController{MockWatch: func(source.Source, handler.EventHandler, ...predicate.Predicate) error { return errBoom }}
return c, nil
}),
),
args: args{
name: "coolcontroller",
w: []Watch{For(&fake.Managed{}, nil)},
},
want: want{
err: errors.Wrap(errBoom, errWatch),
},
},
"CacheCrashError": {
reason: "Errors starting or running a cache should be returned",
e: NewEngine(&fake.Manager{},
WithNewCacheFn(func(*rest.Config, cache.Options) (cache.Cache, error) {
c := &MockCache{MockStart: func(stop context.Context) error { return errBoom }}
return c, nil
}),
WithNewControllerFn(func(string, manager.Manager, controller.Options) (controller.Controller, error) {
c := &MockController{MockStart: func(stop context.Context) error {
return nil
}}
return c, nil
}),
),
args: args{
name: "coolcontroller",
},
want: want{
crash: errors.Wrap(errBoom, errCrashCache),
},
},
"ControllerCrashError": {
reason: "Errors starting or running a controller should be returned",
e: NewEngine(&fake.Manager{},
WithNewCacheFn(func(*rest.Config, cache.Options) (cache.Cache, error) {
c := &MockCache{MockStart: func(stop context.Context) error {
return nil
}}
return c, nil
}),
WithNewControllerFn(func(string, manager.Manager, controller.Options) (controller.Controller, error) {
c := &MockController{MockStart: func(stop context.Context) error {
return errBoom
}}
return c, nil
}),
),
args: args{
name: "coolcontroller",
},
want: want{
crash: errors.Wrap(errBoom, errCrashController),
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
err := tc.e.Start(tc.args.name, tc.args.o, tc.args.w...)
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\ne.Start(...): -want error, +got error:\n%s", tc.reason, diff)
}
// Give the goroutines a little time to return an error. If this
// becomes flaky or time consuming we could use a ticker instead.
time.Sleep(100 * time.Millisecond)
tc.e.Stop(tc.args.name)
if diff := cmp.Diff(tc.want.crash, tc.e.Err(tc.args.name), test.EquateErrors()); diff != "" {
t.Errorf("\n%s\ne.Err(...): -want error, +got error:\n%s", tc.reason, diff)
}
})
}
}

View File

@ -1,15 +0,0 @@
package controller
import (
"k8s.io/apimachinery/pkg/runtime/schema"
)
// A Gate is an interface to allow reconcilers to delay a callback until a set of GVKs are set to true inside the gate.
type Gate interface {
// Register to call a callback function when all given GVKs are marked true. If the callback is unblocked, the
// registration is removed.
Register(callback func(), gvks ...schema.GroupVersionKind)
// Set marks the associated condition to the given value. If the condition is already set as
// that value, then this is a no-op. Returns true if there was an update detected.
Set(gvk schema.GroupVersionKind, ready bool) bool
}

View File

@ -14,20 +14,17 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Package controller configures controller options.
package controller
import (
"crypto/tls"
"time"
"k8s.io/client-go/util/workqueue"
"sigs.k8s.io/controller-runtime/pkg/controller"
"github.com/crossplane/crossplane-runtime/pkg/feature"
"github.com/crossplane/crossplane-runtime/pkg/logging"
"github.com/crossplane/crossplane-runtime/pkg/ratelimiter"
"github.com/crossplane/crossplane-runtime/pkg/reconciler/managed"
"github.com/crossplane/crossplane-runtime/pkg/statemetrics"
)
// DefaultOptions returns a functional set of options with conservative
@ -49,7 +46,7 @@ type Options struct {
// The GlobalRateLimiter used by this controller manager. The rate of
// reconciles across all controllers will be subject to this limit.
GlobalRateLimiter ratelimiter.RateLimiter
GlobalRateLimiter workqueue.RateLimiter
// PollInterval at which each controller should speculatively poll to
// determine whether it has work to do.
@ -60,51 +57,12 @@ type Options struct {
// Features that should be enabled.
Features *feature.Flags
// ESSOptions for External Secret Stores.
ESSOptions *ESSOptions
// MetricOptions for recording metrics.
MetricOptions *MetricOptions
// ChangeLogOptions for recording change logs.
ChangeLogOptions *ChangeLogOptions
// Gate implements a gated function callback pattern.
Gate Gate
}
// ForControllerRuntime extracts options for controller-runtime.
func (o Options) ForControllerRuntime() controller.Options {
recoverPanic := true
return controller.Options{
MaxConcurrentReconciles: o.MaxConcurrentReconciles,
RateLimiter: ratelimiter.NewController(),
RecoverPanic: &recoverPanic,
}
}
// ESSOptions for External Secret Stores.
type ESSOptions struct {
TLSConfig *tls.Config
TLSSecretName *string
}
// MetricOptions for recording metrics.
type MetricOptions struct {
// PollStateMetricInterval at which each controller should record state
PollStateMetricInterval time.Duration
// MetricsRecorder to use for recording metrics.
MRMetrics managed.MetricRecorder
// MRStateMetrics to use for recording state metrics.
MRStateMetrics *statemetrics.MRStateMetrics
}
// ChangeLogOptions for recording changes to managed resources into the change
// logs.
type ChangeLogOptions struct {
ChangeLogger managed.ChangeLogger
}

View File

@ -22,8 +22,6 @@ package errors
import (
"errors"
"fmt"
kerrors "k8s.io/apimachinery/pkg/util/errors"
)
// New returns an error that formats as the given text. Each call to New returns
@ -54,7 +52,7 @@ func Is(err, target error) bool { return errors.Is(err, target) }
// by repeatedly calling Unwrap.
//
// An error matches target if the error's concrete value is assignable to the
// value pointed to by target, or if the error has a method As(any) bool
// value pointed to by target, or if the error has a method As(interface{}) bool
// such that As(target) returns true. In the latter case, the As method is
// responsible for setting target.
//
@ -63,7 +61,7 @@ func Is(err, target error) bool { return errors.Is(err, target) }
//
// As panics if target is not a non-nil pointer to either a type that implements
// error, or to any interface type.
func As(err error, target any) bool { return errors.As(err, target) }
func As(err error, target interface{}) bool { return errors.As(err, target) }
// Unwrap returns the result of calling the Unwrap method on err, if err's type
// contains an Unwrap method returning error. Otherwise, Unwrap returns nil.
@ -77,7 +75,7 @@ func Unwrap(err error) error { return errors.Unwrap(err) }
// invalid to include more than one %w verb or to supply it with an operand that
// does not implement the error interface. The %w verb is otherwise a synonym
// for %v.
func Errorf(format string, a ...any) error { return fmt.Errorf(format, a...) }
func Errorf(format string, a ...interface{}) error { return fmt.Errorf(format, a...) }
// WithMessage annotates err with a new message. If err is nil, WithMessage
// returns nil.
@ -85,17 +83,15 @@ func WithMessage(err error, message string) error {
if err == nil {
return nil
}
return fmt.Errorf("%s: %w", message, err)
}
// WithMessagef annotates err with the format specifier. If err is nil,
// WithMessagef returns nil.
func WithMessagef(err error, format string, args ...any) error {
func WithMessagef(err error, format string, args ...interface{}) error {
if err == nil {
return nil
}
return fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err)
}
@ -104,8 +100,8 @@ func Wrap(err error, message string) error {
return WithMessage(err, message)
}
// Wrapf is an alias for WithMessagef.
func Wrapf(err error, format string, args ...any) error {
// Wrapf is an alias for WithMessagef
func Wrapf(err error, format string, args ...interface{}) error {
return WithMessagef(err, format, args...)
}
@ -118,49 +114,15 @@ func Cause(err error) error {
}
for err != nil {
// We're ignoring errorlint telling us to use errors.As because
// we actually do want to check the outermost error.
//nolint:errorlint
w, ok := err.(wrapped)
if !ok {
return err
}
err = w.Unwrap()
}
return err
}
// MultiError is an error that wraps multiple errors.
type MultiError interface {
error
Unwrap() []error
}
// Join returns an error that wraps the given errors. Any nil error values are
// discarded. Join returns nil if errs contains no non-nil values. The error
// formats as the concatenation of the strings obtained by calling the Error
// method of each element of errs and formatting like:
//
// [first error, second error, third error]
//
// Note: aggregating errors should not be the default. Usually, return only the
// first error, and only aggregate if there is clear value to the user.
func Join(errs ...error) MultiError {
err := kerrors.NewAggregate(errs)
if err == nil {
return nil
}
return multiError{aggregate: err}
}
type multiError struct {
aggregate kerrors.Aggregate
}
func (m multiError) Error() string {
return m.aggregate.Error()
}
func (m multiError) Unwrap() []error {
return m.aggregate.Errors()
}

View File

@ -29,7 +29,6 @@ func TestWrap(t *testing.T) {
err error
message string
}
cases := map[string]struct {
args args
want error
@ -64,9 +63,8 @@ func TestWrapf(t *testing.T) {
type args struct {
err error
message string
args []any
args []interface{}
}
cases := map[string]struct {
args args
want error
@ -82,7 +80,7 @@ func TestWrapf(t *testing.T) {
args: args{
err: New("boom"),
message: "very useful context about %s",
args: []any{"ducks"},
args: []interface{}{"ducks"},
},
want: Errorf("very useful context about %s: %w", "ducks", New("boom")),
},

View File

@ -1,50 +0,0 @@
/*
Copyright 2023 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package errors
import (
"context"
kerrors "k8s.io/apimachinery/pkg/api/errors"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
// SilentlyRequeueOnConflict returns a requeue result and silently drops the
// error if it is a Kubernetes conflict error from the optimistic concurrency
// protocol.
func SilentlyRequeueOnConflict(result reconcile.Result, err error) (reconcile.Result, error) {
if kerrors.IsConflict(Cause(err)) {
return reconcile.Result{Requeue: true}, nil
}
return result, err
}
// WithSilentRequeueOnConflict wraps a Reconciler and silently drops conflict
// errors and requeues instead.
func WithSilentRequeueOnConflict(r reconcile.Reconciler) reconcile.Reconciler {
return &silentlyRequeueOnConflict{Reconciler: r}
}
type silentlyRequeueOnConflict struct {
reconcile.Reconciler
}
func (r *silentlyRequeueOnConflict) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
result, err := r.Reconciler.Reconcile(ctx, req)
return SilentlyRequeueOnConflict(result, err)
}

View File

@ -1,102 +0,0 @@
/*
Copyright 2023 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package errors
import (
"testing"
"time"
"github.com/google/go-cmp/cmp"
kerrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"github.com/crossplane/crossplane-runtime/pkg/test"
)
func TestSilentlyRequeueOnConflict(t *testing.T) {
type args struct {
result reconcile.Result
err error
}
type want struct {
result reconcile.Result
err error
}
tests := []struct {
reason string
args args
want want
}{
{
reason: "nil error",
args: args{
result: reconcile.Result{RequeueAfter: time.Second},
},
want: want{
result: reconcile.Result{RequeueAfter: time.Second},
},
},
{
reason: "other error",
args: args{
result: reconcile.Result{RequeueAfter: time.Second},
err: New("some other error"),
},
want: want{
result: reconcile.Result{RequeueAfter: time.Second},
err: New("some other error"),
},
},
{
reason: "conflict error",
args: args{
result: reconcile.Result{RequeueAfter: time.Second},
err: kerrors.NewConflict(schema.GroupResource{Group: "nature", Resource: "stones"}, "foo", New("nested error")),
},
want: want{
result: reconcile.Result{Requeue: true},
},
},
{
reason: "nested conflict error",
args: args{
result: reconcile.Result{RequeueAfter: time.Second},
err: Wrap(
kerrors.NewConflict(schema.GroupResource{Group: "nature", Resource: "stones"}, "foo", New("nested error")),
"outer error"),
},
want: want{
result: reconcile.Result{Requeue: true},
},
},
}
for _, tt := range tests {
t.Run(tt.reason, func(t *testing.T) {
got, err := SilentlyRequeueOnConflict(tt.args.result, tt.args.err)
if diff := cmp.Diff(tt.want.err, err, test.EquateErrors()); diff != "" {
t.Errorf("\n%s\nIgnoreConflict(...): -want error, +got error:\n%s", tt.reason, diff)
}
if diff := cmp.Diff(tt.want.result, got); diff != "" {
t.Errorf("\n%s\nIgnoreConflict(...): -want result, +got result:\n%s", tt.reason, diff)
}
})
}
}

View File

@ -27,7 +27,7 @@ type Type string
// Event types. See below for valid types.
// https://godoc.org/k8s.io/client-go/tools/record#EventRecorder
const (
var (
TypeNormal Type = "Normal"
TypeWarning Type = "Warning"
)
@ -52,7 +52,6 @@ func Normal(r Reason, message string, keysAndValues ...string) Event {
Annotations: map[string]string{},
}
sliceMap(keysAndValues, e.Annotations)
return e
}
@ -65,7 +64,6 @@ func Warning(r Reason, err error, keysAndValues ...string) Event {
Annotations: map[string]string{},
}
sliceMap(keysAndValues, e.Annotations)
return e
}
@ -79,27 +77,17 @@ type Recorder interface {
type APIRecorder struct {
kube record.EventRecorder
annotations map[string]string
filterFns []FilterFn
}
// FilterFn is a function used to filter events.
// It should return false when events should not be sent.
type FilterFn func(obj runtime.Object, e Event) bool
// NewAPIRecorder returns an APIRecorder that records Kubernetes events to an
// APIServer using the supplied EventRecorder.
func NewAPIRecorder(r record.EventRecorder, fns ...FilterFn) *APIRecorder {
return &APIRecorder{kube: r, annotations: map[string]string{}, filterFns: fns}
func NewAPIRecorder(r record.EventRecorder) *APIRecorder {
return &APIRecorder{kube: r, annotations: map[string]string{}}
}
// Event records the supplied event.
func (r *APIRecorder) Event(obj runtime.Object, e Event) {
for _, filter := range r.filterFns {
if filter(obj, e) {
return
}
}
r.kube.AnnotatedEventf(obj, r.annotations, string(e.Type), string(e.Reason), "%s", e.Message)
r.kube.AnnotatedEventf(obj, r.annotations, string(e.Type), string(e.Reason), e.Message)
}
// WithAnnotations returns a new *APIRecorder that includes the supplied
@ -109,9 +97,7 @@ func (r *APIRecorder) WithAnnotations(keysAndValues ...string) Recorder {
for k, v := range r.annotations {
ar.annotations[k] = v
}
sliceMap(keysAndValues, ar.annotations)
return ar
}

View File

@ -24,11 +24,11 @@ import (
)
func TestSliceMap(t *testing.T) {
type args struct {
from []string
to map[string]string
}
cases := map[string]struct {
reason string
args args
@ -86,4 +86,5 @@ func TestSliceMap(t *testing.T) {
}
})
}
}

View File

@ -33,11 +33,9 @@ type Flags struct {
// Enable a feature flag.
func (fs *Flags) Enable(f Flag) {
fs.m.Lock()
if fs.enabled == nil {
fs.enabled = make(map[Flag]bool)
}
fs.enabled[f] = true
fs.m.Unlock()
}
@ -47,9 +45,7 @@ func (fs *Flags) Enabled(f Flag) bool {
if fs == nil {
return false
}
fs.m.RLock()
defer fs.m.RUnlock()
return fs.enabled[f]
}

View File

@ -1,27 +0,0 @@
/*
Copyright 2023 The Crossplane Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package feature
// EnableBetaManagementPolicies enables beta support for
// Management Policies. See the below design for more details.
// https://github.com/crossplane/crossplane/pull/3531
const EnableBetaManagementPolicies Flag = "EnableBetaManagementPolicies"
// EnableAlphaChangeLogs enables alpha support for capturing change logs during
// reconciliation. See the following design for more details:
// https://github.com/crossplane/crossplane/pull/5822
const EnableAlphaChangeLogs Flag = "EnableAlphaChangeLogs"

View File

@ -82,7 +82,6 @@ func (sg Segments) String() string {
b.WriteString(fmt.Sprintf("[%s]", s.Field))
continue
}
b.WriteString(fmt.Sprintf(".%s", s.Field))
case SegmentIndex:
b.WriteString(fmt.Sprintf("[%d]", s.Index))
@ -121,9 +120,9 @@ func Parse(path string) (Segments, error) {
go l.run()
segments := make(Segments, 0, 1)
for i := range l.items {
switch i.typ { //nolint:exhaustive // We're only worried about names, not separators.
// We're only worried about names, not separators.
switch i.typ { // nolint:exhaustive
case itemField:
segments = append(segments, Field(i.val))
case itemFieldOrIndex:
@ -132,7 +131,6 @@ func Parse(path string) (Segments, error) {
return nil, errors.Errorf("%s at position %d", i.val, i.pos)
}
}
return segments, nil
}
@ -177,7 +175,6 @@ func (l *lexer) run() {
for state := lexField; state != nil; {
state = state(l)
}
close(l.items)
}
@ -186,13 +183,11 @@ func (l *lexer) emit(t itemType) {
if l.pos <= l.start {
return
}
l.items <- item{typ: t, pos: l.start, val: l.input[l.start:l.pos]}
l.start = l.pos
}
func (l *lexer) errorf(pos int, format string, args ...any) stateFn {
func (l *lexer) errorf(pos int, format string, args ...interface{}) stateFn {
l.items <- item{typ: itemError, pos: pos, val: fmt.Sprintf(format, args...)}
return nil
}
@ -208,14 +203,12 @@ func lexField(l *lexer) stateFn {
case leftBracket:
l.pos += i
l.emit(itemField)
return lexLeftBracket
// A period indicates the end of the field name.
case period:
l.pos += i
l.emit(itemField)
return lexPeriod
}
}
@ -224,7 +217,6 @@ func lexField(l *lexer) stateFn {
l.pos = len(l.input)
l.emit(itemField)
l.emit(itemEOL)
return nil
}
@ -243,7 +235,6 @@ func lexPeriod(l *lexer) stateFn {
if r == period {
return l.errorf(l.pos, "unexpected %q", period)
}
if r == leftBracket {
return l.errorf(l.pos, "unexpected %q", leftBracket)
}
@ -259,7 +250,6 @@ func lexLeftBracket(l *lexer) stateFn {
l.pos += utf8.RuneLen(leftBracket)
l.emit(itemLeftBracket)
return lexFieldOrIndex
}
@ -283,13 +273,11 @@ func lexFieldOrIndex(l *lexer) stateFn {
// Periods are not considered field separators when we're inside brackets.
l.pos += rbi
l.emit(itemFieldOrIndex)
return lexRightBracket
}
func lexRightBracket(l *lexer) stateFn {
l.pos += utf8.RuneLen(rightBracket)
l.emit(itemRightBracket)
return lexField
}

View File

@ -72,6 +72,7 @@ func TestSegments(t *testing.T) {
if diff := cmp.Diff(tc.want, tc.s.String()); diff != "" {
t.Errorf("s.String(): -want, +got:\n %s", diff)
}
})
}
}
@ -299,7 +300,6 @@ func TestParse(t *testing.T) {
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Fatalf("\nParse(%s): %s: -want error, +got error:\n%s", tc.path, tc.reason, diff)
}
if diff := cmp.Diff(tc.want.s, got); diff != "" {
t.Errorf("\nParse(%s): %s: -want, +got:\n%s", tc.path, tc.reason, diff)
}

View File

@ -19,7 +19,7 @@ package fieldpath
import (
"reflect"
"dario.cat/mergo"
"github.com/imdario/mergo"
xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1"
"github.com/crossplane/crossplane-runtime/pkg/errors"
@ -30,8 +30,8 @@ const (
)
// MergeValue of the receiver p at the specified field path with the supplied
// value according to supplied merge options.
func (p *Paved) MergeValue(path string, value any, mo *xpv1.MergeOptions) error {
// value according to supplied merge options
func (p *Paved) MergeValue(path string, value interface{}, mo *xpv1.MergeOptions) error {
dst, err := p.GetValue(path)
if IsNotFound(err) || mo == nil {
dst = nil
@ -52,14 +52,13 @@ func (p *Paved) MergeValue(path string, value any, mo *xpv1.MergeOptions) error
// If a nil merge options is supplied, the default behavior is MergeOptions'
// default behavior. If dst or src is nil, src is returned
// (i.e., dst replaced by src).
func merge(dst, src any, mergeOptions *xpv1.MergeOptions) (any, error) {
func merge(dst, src interface{}, mergeOptions *xpv1.MergeOptions) (interface{}, error) {
// because we are merging values of a field, which can be a slice, and
// because mergo currently supports merging only maps or structs,
// we wrap the argument to be passed to mergo.Merge in a map.
const keyArg = "arg"
argWrap := func(arg any) map[string]any {
return map[string]any{
argWrap := func(arg interface{}) map[string]interface{} {
return map[string]interface{}{
keyArg: arg,
}
}
@ -78,28 +77,24 @@ func merge(dst, src any, mergeOptions *xpv1.MergeOptions) (any, error) {
if err := mergo.Merge(&mDst, argWrap(src), mergeOptions.MergoConfiguration()...); err != nil {
return nil, errors.Wrap(err, errInvalidMerge)
}
return mDst[keyArg], nil
}
func removeSourceDuplicates(dst, src any) any {
func removeSourceDuplicates(dst, src interface{}) interface{} {
sliceDst, sliceSrc := reflect.ValueOf(dst), reflect.ValueOf(src)
if sliceDst.Kind() == reflect.Ptr {
sliceDst = sliceDst.Elem()
}
if sliceSrc.Kind() == reflect.Ptr {
sliceSrc = sliceSrc.Elem()
}
if sliceDst.Kind() != reflect.Slice || sliceSrc.Kind() != reflect.Slice {
return src
}
result := reflect.New(sliceSrc.Type()).Elem() // we will not modify src
for i := range sliceSrc.Len() {
for i := 0; i < sliceSrc.Len(); i++ {
itemSrc := sliceSrc.Index(i)
found := false
for j := 0; j < sliceDst.Len() && !found; j++ {
// if src item is found in the dst array
@ -107,12 +102,10 @@ func removeSourceDuplicates(dst, src any) any {
found = true
}
}
if !found {
// then put src item into result
result = reflect.Append(result, itemSrc)
}
}
return result.Interface()
}

View File

@ -36,7 +36,6 @@ func TestMergeValue(t *testing.T) {
valSrc2 = "e1-from-source-2"
valDst = "e1-from-destination"
)
formatArr := func(arr []string) string {
return fmt.Sprintf(`{"%s": ["%s"]}`, pathTest, strings.Join(arr, `", "`))
}
@ -48,30 +47,27 @@ func TestMergeValue(t *testing.T) {
}
arrSrc := []string{valSrc}
fnMapSrc := func() map[string]any {
return map[string]any{pathTest: valSrc}
fnMapSrc := func() map[string]interface{} {
return map[string]interface{}{pathTest: valSrc}
}
arrDst := []string{valDst}
fnMapDst := func() map[string]any {
return map[string]any{pathTest: map[string]any{pathTest: valDst}}
fnMapDst := func() map[string]interface{} {
return map[string]interface{}{pathTest: map[string]interface{}{pathTest: valDst}}
}
valFalse, valTrue := false, true
type fields struct {
object map[string]any
object map[string]interface{}
}
type args struct {
path string
value any
value interface{}
mo *xpv1.MergeOptions
}
type want struct {
serialized string
err error
}
tests := map[string]struct {
reason string
fields fields
@ -81,7 +77,7 @@ func TestMergeValue(t *testing.T) {
"MergeArrayNoMergeOptions": {
reason: "If no merge options are given, default is to override an array",
fields: fields{
object: map[string]any{
object: map[string]interface{}{
pathTest: valDst,
},
},
@ -96,7 +92,7 @@ func TestMergeValue(t *testing.T) {
"MergeArrayNoAppend": {
reason: "If MergeOptions.AppendSlice is false, an array should be overridden when merging",
fields: fields{
object: map[string]any{
object: map[string]interface{}{
pathTest: arrDst,
},
},
@ -114,7 +110,7 @@ func TestMergeValue(t *testing.T) {
"MergeArrayAppend": {
reason: "If MergeOptions.AppendSlice is true, dst array should be merged with the src array",
fields: fields{
object: map[string]any{
object: map[string]interface{}{
pathTest: arrDst,
},
},
@ -132,7 +128,7 @@ func TestMergeValue(t *testing.T) {
"MergeArrayAppendDuplicate": {
reason: "If MergeOptions.AppendSlice is true, dst array should be merged with the src array not allowing duplicates",
fields: fields{
object: map[string]any{
object: map[string]interface{}{
pathTest: []string{valDst, valSrc},
},
},
@ -195,7 +191,7 @@ func TestMergeValue(t *testing.T) {
}
for name, tc := range tests {
t.Run(name, func(t *testing.T) {
want := make(map[string]any)
want := make(map[string]interface{})
if err := json.Unmarshal([]byte(tc.want.serialized), &want); err != nil {
t.Fatalf("Test case error: Unable to unmarshall JSON doc: %v", err)
}
@ -203,13 +199,11 @@ func TestMergeValue(t *testing.T) {
p := &Paved{
object: tc.fields.object,
}
err := p.MergeValue(tc.args.path, tc.args.value, tc.args.mo)
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Fatalf("\np.MergeValue(%s, %v): %s: -want error, +got error:\n%s",
tc.args.path, tc.args.value, tc.reason, diff)
}
if diff := cmp.Diff(want, p.object); diff != "" {
t.Fatalf("\np.MergeValue(%s, %v): %s: -want, +got:\n%s",
tc.args.path, tc.args.value, tc.reason, diff)

View File

@ -28,11 +28,11 @@ import (
// DefaultMaxFieldPathIndex is the max allowed index in a field path.
const DefaultMaxFieldPathIndex = 1024
type notFoundError struct {
type errNotFound struct {
error
}
func (e notFoundError) IsNotFound() bool {
func (e errNotFound) IsNotFound() bool {
return true
}
@ -44,13 +44,12 @@ func IsNotFound(err error) bool {
_, ok := cause.(interface {
IsNotFound() bool
})
return ok
}
// A Paved JSON object supports getting and setting values by their field path.
type Paved struct {
object map[string]any
object map[string]interface{}
maxFieldPathIndex uint
}
@ -65,7 +64,7 @@ func PaveObject(o runtime.Object, opts ...PavedOption) (*Paved, error) {
}
// Pave a JSON object, making it possible to get and set values by field path.
func Pave(object map[string]any, opts ...PavedOption) *Paved {
func Pave(object map[string]interface{}, opts ...PavedOption) *Paved {
p := &Paved{object: object, maxFieldPathIndex: DefaultMaxFieldPathIndex}
for _, opt := range opts {
@ -76,9 +75,9 @@ func Pave(object map[string]any, opts ...PavedOption) *Paved {
}
// WithMaxFieldPathIndex returns a PavedOption that sets the max allowed index for field paths, 0 means no limit.
func WithMaxFieldPathIndex(maxIndex uint) PavedOption {
func WithMaxFieldPathIndex(max uint) PavedOption {
return func(paved *Paved) {
paved.maxFieldPathIndex = maxIndex
paved.maxFieldPathIndex = max
}
}
@ -97,61 +96,51 @@ func (p *Paved) UnmarshalJSON(data []byte) error {
}
// UnstructuredContent returns the JSON serialisable content of this Paved.
func (p *Paved) UnstructuredContent() map[string]any {
func (p *Paved) UnstructuredContent() map[string]interface{} {
if p.object == nil {
return make(map[string]any)
return make(map[string]interface{})
}
return p.object
}
// SetUnstructuredContent sets the JSON serialisable content of this Paved.
func (p *Paved) SetUnstructuredContent(content map[string]any) {
func (p *Paved) SetUnstructuredContent(content map[string]interface{}) {
p.object = content
}
func (p *Paved) getValue(s Segments) (any, error) {
func (p *Paved) getValue(s Segments) (interface{}, error) {
return getValueFromInterface(p.object, s)
}
func getValueFromInterface(it any, s Segments) (any, error) {
func getValueFromInterface(it interface{}, s Segments) (interface{}, error) {
for i, current := range s {
final := i == len(s)-1
switch current.Type {
case SegmentIndex:
array, ok := it.([]any)
array, ok := it.([]interface{})
if !ok {
return nil, errors.Errorf("%s: not an array", s[:i])
}
if current.Index >= uint(len(array)) {
return nil, notFoundError{errors.Errorf("%s: no such element", s[:i+1])}
if int(current.Index) >= len(array) {
return nil, errNotFound{errors.Errorf("%s: no such element", s[:i+1])}
}
if final {
return array[current.Index], nil
}
it = array[current.Index]
case SegmentField:
switch object := it.(type) {
case map[string]any:
v, ok := object[current.Field]
if !ok {
return nil, notFoundError{errors.Errorf("%s: no such field", s[:i+1])}
}
if final {
return v, nil
}
it = object[current.Field]
case nil:
return nil, notFoundError{errors.Errorf("%s: expected map, got nil", s[:i])}
default:
object, ok := it.(map[string]interface{})
if !ok {
return nil, errors.Errorf("%s: not an object", s[:i])
}
v, ok := object[current.Field]
if !ok {
return nil, errNotFound{errors.Errorf("%s: no such field", s[:i+1])}
}
if final {
return v, nil
}
it = object[current.Field]
}
}
@ -167,89 +156,74 @@ func getValueFromInterface(it any, s Segments) (any, error) {
//
// For a Paved object with the following data: []byte(`{"spec":{"containers":[{"name":"cool", "image": "latest", "args": ["start", "now", "debug"]}]}}`),
// ExpandWildcards("spec.containers[*].args[*]") returns:
// []string{"spec.containers[0].args[0]", "spec.containers[0].args[1]", "spec.containers[0].args[2]"},.
// []string{"spec.containers[0].args[0]", "spec.containers[0].args[1]", "spec.containers[0].args[2]"},
func (p *Paved) ExpandWildcards(path string) ([]string, error) {
segments, err := Parse(path)
if err != nil {
return nil, errors.Wrapf(err, "cannot parse path %q", path)
}
segmentsArray, err := expandWildcards(p.object, segments)
if err != nil {
return nil, errors.Wrapf(err, "cannot expand wildcards for segments: %q", segments)
}
paths := make([]string, len(segmentsArray))
for i, s := range segmentsArray {
paths[i] = s.String()
}
return paths, nil
}
func expandWildcards(data any, segments Segments) ([]Segments, error) { //nolint:gocognit // See note below.
// Even complexity turns out to be high, it is mostly because we have duplicate
// logic for arrays and maps and a couple of error handling.
// Note(turkenh): Explanation for nolint:gocyclo
// Even complexity turns out to be high, it is mostly because we have duplicate
// logic for arrays and maps and a couple of error handling.
func expandWildcards(data interface{}, segments Segments) ([]Segments, error) { //nolint:gocyclo
var res []Segments
it := data
for i, current := range segments {
// wildcards are regular fields with "*" as string
if current.Type == SegmentField && current.Field == wildcard {
switch mapOrArray := it.(type) {
case []any:
case []interface{}:
for ix := range mapOrArray {
expanded := make(Segments, len(segments))
copy(expanded, segments)
expanded = append(append(expanded[:i], FieldOrIndex(strconv.Itoa(ix))), expanded[i+1:]...)
r, err := expandWildcards(data, expanded)
if err != nil {
return nil, errors.Wrapf(err, "%q: cannot expand wildcards", expanded)
}
res = append(res, r...)
}
case map[string]any:
case map[string]interface{}:
for k := range mapOrArray {
expanded := make(Segments, len(segments))
copy(expanded, segments)
expanded = append(append(expanded[:i], Field(k)), expanded[i+1:]...)
r, err := expandWildcards(data, expanded)
if err != nil {
return nil, errors.Wrapf(err, "%q: cannot expand wildcards", expanded)
}
res = append(res, r...)
}
case nil:
return nil, notFoundError{errors.Errorf("wildcard field %q is not found in the path", segments[:i])}
default:
return nil, errors.Errorf("%q: unexpected wildcard usage", segments[:i])
}
return res, nil
}
var err error
it, err = getValueFromInterface(data, segments[:i+1])
if IsNotFound(err) {
return nil, nil
}
if err != nil {
return nil, err
}
}
return append(res, segments), nil
}
// GetValue of the supplied field path.
func (p *Paved) GetValue(path string) (any, error) {
func (p *Paved) GetValue(path string) (interface{}, error) {
segments, err := Parse(path)
if err != nil {
return nil, errors.Wrapf(err, "cannot parse path %q", path)
@ -259,17 +233,15 @@ func (p *Paved) GetValue(path string) (any, error) {
}
// GetValueInto the supplied type.
func (p *Paved) GetValueInto(path string, out any) error {
func (p *Paved) GetValueInto(path string, out interface{}) error {
val, err := p.GetValue(path)
if err != nil {
return err
}
js, err := json.Marshal(val)
if err != nil {
return errors.Wrap(err, "cannot marshal value to JSON")
}
return errors.Wrap(json.Unmarshal(js, out), "cannot unmarshal value from JSON")
}
@ -284,7 +256,6 @@ func (p *Paved) GetString(path string) (string, error) {
if !ok {
return "", errors.Errorf("%s: not a string", path)
}
return s, nil
}
@ -295,7 +266,7 @@ func (p *Paved) GetStringArray(path string) ([]string, error) {
return nil, err
}
a, ok := v.([]any)
a, ok := v.([]interface{})
if !ok {
return nil, errors.Errorf("%s: not an array", path)
}
@ -306,7 +277,6 @@ func (p *Paved) GetStringArray(path string) ([]string, error) {
if !ok {
return nil, errors.Errorf("%s: not an array of strings", path)
}
sa[i] = s
}
@ -320,7 +290,7 @@ func (p *Paved) GetStringObject(path string) (map[string]string, error) {
return nil, err
}
o, ok := v.(map[string]any)
o, ok := v.(map[string]interface{})
if !ok {
return nil, errors.Errorf("%s: not an object", path)
}
@ -331,8 +301,8 @@ func (p *Paved) GetStringObject(path string) (map[string]string, error) {
if !ok {
return nil, errors.Errorf("%s: not an object with string field values", path)
}
so[k] = s
}
return so, nil
@ -349,10 +319,31 @@ func (p *Paved) GetBool(path string) (bool, error) {
if !ok {
return false, errors.Errorf("%s: not a bool", path)
}
return b, nil
}
// NOTE(muvaf): If there is no CRD, unstructured.Unstructured reads numbers as
// float64. However, in practice, use of float64 is discouraged and when you fetch
// an instance of a CRD whose number fields are int64, you'll get int64. So,
// it's not really possible to test this without an api-server but that's the
// actual behavior.
// GetNumber value of the supplied field path.
// Deprecated: Use of float64 is discouraged. Please use GetInteger.
// See https://github.com/kubernetes/community/blob/c9ae475/contributors/devel/sig-architecture/api-conventions.md#primitive-types
func (p *Paved) GetNumber(path string) (float64, error) {
v, err := p.GetValue(path)
if err != nil {
return 0, err
}
f, ok := v.(float64)
if !ok {
return 0, errors.Errorf("%s: not a (float64) number", path)
}
return f, nil
}
// GetInteger value of the supplied field path.
func (p *Paved) GetInteger(path string) (int64, error) {
v, err := p.GetValue(path)
@ -364,14 +355,13 @@ func (p *Paved) GetInteger(path string) (int64, error) {
if !ok {
return 0, errors.Errorf("%s: not a (int64) number", path)
}
return f, nil
}
func (p *Paved) setValue(s Segments, value any) error {
func (p *Paved) setValue(s Segments, value interface{}) error {
// We expect p.object to look like JSON data that was unmarshalled into an
// any per https://golang.org/pkg/encoding/json/#Unmarshal. We
// marshal our value to JSON and unmarshal it into an any to ensure
// interface{} per https://golang.org/pkg/encoding/json/#Unmarshal. We
// marshal our value to JSON and unmarshal it into an interface{} to ensure
// it meets these criteria before setting it within p.object.
v, err := toValidJSON(value)
if err != nil {
@ -382,14 +372,13 @@ func (p *Paved) setValue(s Segments, value any) error {
return err
}
var in any = p.object
var in interface{} = p.object
for i, current := range s {
final := i == len(s)-1
switch current.Type {
case SegmentIndex:
array, ok := in.([]any)
array, ok := in.([]interface{})
if !ok {
return errors.Errorf("%s is not an array", s[:i])
}
@ -403,7 +392,7 @@ func (p *Paved) setValue(s Segments, value any) error {
in = array[current.Index]
case SegmentField:
object, ok := in.(map[string]any)
object, ok := in.(map[string]interface{})
if !ok {
return errors.Errorf("%s is not an object", s[:i])
}
@ -421,32 +410,28 @@ func (p *Paved) setValue(s Segments, value any) error {
return nil
}
func toValidJSON(value any) (any, error) {
var v any
func toValidJSON(value interface{}) (interface{}, error) {
var v interface{}
j, err := json.Marshal(value)
if err != nil {
return nil, errors.Wrap(err, "cannot marshal value to JSON")
}
if err := json.Unmarshal(j, &v); err != nil {
return nil, errors.Wrap(err, "cannot unmarshal value from JSON")
}
return v, nil
}
func prepareElement(array []any, current, next Segment) {
func prepareElement(array []interface{}, current, next Segment) {
// If this segment is not the final one and doesn't exist we need to
// create it for our next segment.
if array[current.Index] == nil {
switch next.Type {
case SegmentIndex:
array[current.Index] = make([]any, next.Index+1)
array[current.Index] = make([]interface{}, next.Index+1)
case SegmentField:
array[current.Index] = make(map[string]any)
array[current.Index] = make(map[string]interface{})
}
return
}
@ -456,29 +441,28 @@ func prepareElement(array []any, current, next Segment) {
return
}
na, ok := array[current.Index].([]any)
na, ok := array[current.Index].([]interface{})
if !ok {
return
}
if next.Index < uint(len(na)) {
if int(next.Index) < len(na) {
return
}
array[current.Index] = append(na, make([]any, next.Index-uint(len(na))+1)...)
array[current.Index] = append(na, make([]interface{}, int(next.Index)-len(na)+1)...)
}
func prepareField(object map[string]any, current, next Segment) {
func prepareField(object map[string]interface{}, current, next Segment) {
// If this segment is not the final one and doesn't exist we need to
// create it for our next segment.
if _, ok := object[current.Field]; !ok {
switch next.Type {
case SegmentIndex:
object[current.Field] = make([]any, next.Index+1)
object[current.Field] = make([]interface{}, next.Index+1)
case SegmentField:
object[current.Field] = make(map[string]any)
object[current.Field] = make(map[string]interface{})
}
return
}
@ -488,25 +472,24 @@ func prepareField(object map[string]any, current, next Segment) {
return
}
na, ok := object[current.Field].([]any)
na, ok := object[current.Field].([]interface{})
if !ok {
return
}
if next.Index < uint(len(na)) {
if int(next.Index) < len(na) {
return
}
object[current.Field] = append(na, make([]any, next.Index-uint(len(na))+1)...)
object[current.Field] = append(na, make([]interface{}, int(next.Index)-len(na)+1)...)
}
// SetValue at the supplied field path.
func (p *Paved) SetValue(path string, value any) error {
func (p *Paved) SetValue(path string, value interface{}) error {
segments, err := Parse(path)
if err != nil {
return errors.Wrapf(err, "cannot parse path %q", path)
}
return p.setValue(segments, value)
}
@ -514,13 +497,11 @@ func (p *Paved) validateSegments(s Segments) error {
if !p.maxFieldPathIndexEnabled() {
return nil
}
for _, segment := range s {
if segment.Type == SegmentIndex && segment.Index > p.maxFieldPathIndex {
return errors.Errorf("index %v is greater than max allowed index %d", segment.Index, p.maxFieldPathIndex)
}
}
return nil
}
@ -538,130 +519,3 @@ func (p *Paved) SetBool(path string, value bool) error {
func (p *Paved) SetNumber(path string, value float64) error {
return p.SetValue(path, value)
}
// DeleteField deletes the field from the object.
// If the path points to an entry in an array, the element
// on that index is removed and the next ones are pulled
// back. If it is a field on a map, the field is
// removed from the map.
func (p *Paved) DeleteField(path string) error {
segments, err := Parse(path)
if err != nil {
return errors.Wrapf(err, "cannot parse path %q", path)
}
return p.delete(segments)
}
func (p *Paved) delete(segments Segments) error { //nolint:gocognit // See note below.
// NOTE(muvaf): I could not reduce the cyclomatic complexity
// more than that without disturbing the reading flow.
if len(segments) == 1 {
o, err := deleteField(p.object, segments[0])
if err != nil {
return errors.Wrapf(err, "cannot delete %s", segments)
}
p.object = o.(map[string]any) //nolint:forcetypeassert // We're deleting from the root of the paved object, which is always a map[string]any.
return nil
}
var in any = p.object
for i, current := range segments {
// beforeLast is true for the element before the last one because
// slices cannot be changed in place and Go does not allow
// taking address of map elements which prevents us from
// assigning a new array for that entry unless we have the
// map available in the context, which is achieved by iterating
// until the element before the last one as opposed to
// Set/Get functions in this file.
beforeLast := i == len(segments)-2
switch current.Type {
case SegmentIndex:
array, ok := in.([]any)
if !ok {
return errors.Errorf("%s is not an array", segments[:i])
}
// It doesn't exist anyway.
if uint(len(array)) <= current.Index {
return nil
}
if beforeLast {
o, err := deleteField(array[current.Index], segments[len(segments)-1])
if err != nil {
return errors.Wrapf(err, "cannot delete %s", segments)
}
array[current.Index] = o
return nil
}
in = array[current.Index]
case SegmentField:
object, ok := in.(map[string]any)
if !ok {
return errors.Errorf("%s is not an object", segments[:i])
}
// It doesn't exist anyway.
if _, ok := object[current.Field]; !ok {
return nil
}
if beforeLast {
o, err := deleteField(object[current.Field], segments[len(segments)-1])
if err != nil {
return errors.Wrapf(err, "cannot delete %s", segments)
}
object[current.Field] = o
return nil
}
in = object[current.Field]
}
}
return nil
}
// deleteField deletes the object in obj pointed by
// the given Segment and returns it. Returned object
// may or may not have the same address in memory.
func deleteField(obj any, s Segment) (any, error) {
switch s.Type {
case SegmentIndex:
array, ok := obj.([]any)
if !ok {
return nil, errors.New("not an array")
}
if len(array) == 0 || uint(len(array)) <= s.Index {
return array, nil
}
for i := s.Index; i < uint(len(array))-1; i++ {
array[i] = array[i+1]
}
return array[:len(array)-1], nil
case SegmentField:
object, ok := obj.(map[string]any)
if !ok {
return nil, errors.New("not an object")
}
delete(object, s.Field)
return object, nil
}
return nil, nil
}

View File

@ -38,12 +38,12 @@ func TestIsNotFound(t *testing.T) {
}{
"NotFound": {
reason: "An error with method `IsNotFound() bool` should be considered a not found error.",
err: notFoundError{errors.New("boom")},
err: errNotFound{errors.New("boom")},
want: true,
},
"WrapsNotFound": {
reason: "An error that wraps an error with method `IsNotFound() bool` should be considered a not found error.",
err: errors.Wrap(notFoundError{errors.New("boom")}, "because reasons"),
err: errors.Wrap(errNotFound{errors.New("boom")}, "because reasons"),
want: true,
},
"SomethingElse": {
@ -65,10 +65,9 @@ func TestIsNotFound(t *testing.T) {
func TestGetValue(t *testing.T) {
type want struct {
value any
value interface{}
err error
}
cases := map[string]struct {
reason string
path string
@ -128,7 +127,7 @@ func TestGetValue(t *testing.T) {
path: "metadata.name",
data: []byte(`{"metadata":{"nope":"cool"}}`),
want: want{
err: notFoundError{errors.New("metadata.name: no such field")},
err: errNotFound{errors.New("metadata.name: no such field")},
},
},
"InsufficientContainers": {
@ -136,7 +135,7 @@ func TestGetValue(t *testing.T) {
path: "spec.containers[1].name",
data: []byte(`{"spec":{"containers":[{"name":"cool"}]}}`),
want: want{
err: notFoundError{errors.New("spec.containers[1]: no such element")},
err: errNotFound{errors.New("spec.containers[1]: no such element")},
},
},
"NotAnArray": {
@ -162,19 +161,11 @@ func TestGetValue(t *testing.T) {
err: errors.Wrap(errors.New("unexpected ']' at position 5"), "cannot parse path \"spec[]\""),
},
},
"NilParent": {
reason: "Request for a path with a nil parent value",
path: "spec.containers[*].name",
data: []byte(`{"spec":{"containers": null}}`),
want: want{
err: notFoundError{errors.Errorf("%s: expected map, got nil", "spec.containers")},
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
in := make(map[string]any)
in := make(map[string]interface{})
_ = json.Unmarshal(tc.data, &in)
p := Pave(in)
@ -182,7 +173,6 @@ func TestGetValue(t *testing.T) {
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Fatalf("\np.GetValue(%s): %s: -want error, +got error:\n%s", tc.path, tc.reason, diff)
}
if diff := cmp.Diff(tc.want.value, got); diff != "" {
t.Errorf("\np.GetValue(%s): %s: -want, +got:\n%s", tc.path, tc.reason, diff)
}
@ -201,14 +191,12 @@ func TestGetValueInto(t *testing.T) {
type args struct {
path string
out any
out interface{}
}
type want struct {
out any
out interface{}
err error
}
cases := map[string]struct {
reason string
data []byte
@ -246,14 +234,14 @@ func TestGetValueInto(t *testing.T) {
},
want: want{
out: &Struct{},
err: notFoundError{errors.New("s: no such field")},
err: errNotFound{errors.New("s: no such field")},
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
in := make(map[string]any)
in := make(map[string]interface{})
_ = json.Unmarshal(tc.data, &in)
p := Pave(in)
@ -261,7 +249,6 @@ func TestGetValueInto(t *testing.T) {
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Fatalf("\np.GetValueInto(%s): %s: -want error, +got error:\n%s", tc.args.path, tc.reason, diff)
}
if diff := cmp.Diff(tc.want.out, tc.args.out); diff != "" {
t.Errorf("\np.GetValueInto(%s): %s: -want, +got:\n%s", tc.args.path, tc.reason, diff)
}
@ -274,7 +261,6 @@ func TestGetString(t *testing.T) {
value string
err error
}
cases := map[string]struct {
reason string
path string
@ -308,7 +294,7 @@ func TestGetString(t *testing.T) {
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
in := make(map[string]any)
in := make(map[string]interface{})
_ = json.Unmarshal(tc.data, &in)
p := Pave(in)
@ -316,7 +302,6 @@ func TestGetString(t *testing.T) {
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Fatalf("\np.GetString(%s): %s: -want error, +got error:\n%s", tc.path, tc.reason, diff)
}
if diff := cmp.Diff(tc.want.value, got); diff != "" {
t.Errorf("\np.GetString(%s): %s: -want, +got:\n%s", tc.path, tc.reason, diff)
}
@ -329,7 +314,6 @@ func TestGetStringArray(t *testing.T) {
value []string
err error
}
cases := map[string]struct {
reason string
path string
@ -371,7 +355,7 @@ func TestGetStringArray(t *testing.T) {
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
in := make(map[string]any)
in := make(map[string]interface{})
_ = json.Unmarshal(tc.data, &in)
p := Pave(in)
@ -379,7 +363,6 @@ func TestGetStringArray(t *testing.T) {
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Fatalf("\np.GetStringArray(%s): %s: -want error, +got error:\n%s", tc.path, tc.reason, diff)
}
if diff := cmp.Diff(tc.want.value, got); diff != "" {
t.Errorf("\np.GetStringArray(%s): %s: -want, +got:\n%s", tc.path, tc.reason, diff)
}
@ -392,7 +375,6 @@ func TestGetStringObject(t *testing.T) {
value map[string]string
err error
}
cases := map[string]struct {
reason string
path string
@ -434,7 +416,7 @@ func TestGetStringObject(t *testing.T) {
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
in := make(map[string]any)
in := make(map[string]interface{})
_ = json.Unmarshal(tc.data, &in)
p := Pave(in)
@ -442,7 +424,6 @@ func TestGetStringObject(t *testing.T) {
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Fatalf("\np.GetStringObject(%s): %s: -want error, +got error:\n%s", tc.path, tc.reason, diff)
}
if diff := cmp.Diff(tc.want.value, got); diff != "" {
t.Errorf("\np.GetStringObject(%s): %s: -want, +got:\n%s", tc.path, tc.reason, diff)
}
@ -455,7 +436,6 @@ func TestGetBool(t *testing.T) {
value bool
err error
}
cases := map[string]struct {
reason string
path string
@ -489,7 +469,7 @@ func TestGetBool(t *testing.T) {
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
in := make(map[string]any)
in := make(map[string]interface{})
_ = json.Unmarshal(tc.data, &in)
p := Pave(in)
@ -497,7 +477,6 @@ func TestGetBool(t *testing.T) {
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Fatalf("\np.GetBool(%s): %s: -want error, +got error:\n%s", tc.path, tc.reason, diff)
}
if diff := cmp.Diff(tc.want.value, got); diff != "" {
t.Errorf("\np.GetBool(%s): %s: -want, +got:\n%s", tc.path, tc.reason, diff)
}
@ -505,12 +484,64 @@ func TestGetBool(t *testing.T) {
}
}
func TestGetNumber(t *testing.T) {
type want struct {
value float64
err error
}
cases := map[string]struct {
reason string
path string
data []byte
want want
}{
"MetadataVersion": {
reason: "Requesting a number field should work",
path: "metadata.version",
data: []byte(`{"metadata":{"version":2.0}}`),
want: want{
value: 2,
},
},
"MalformedPath": {
reason: "Requesting an invalid field path should fail",
path: "spec[]",
want: want{
err: errors.Wrap(errors.New("unexpected ']' at position 5"), "cannot parse path \"spec[]\""),
},
},
"NotANumber": {
reason: "Requesting an non-number field path should fail",
path: "metadata.name",
data: []byte(`{"metadata":{"name":"cool"}}`),
want: want{
err: errors.New("metadata.name: not a (float64) number"),
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
in := make(map[string]interface{})
_ = json.Unmarshal(tc.data, &in)
p := Pave(in)
got, err := p.GetNumber(tc.path)
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Fatalf("\np.GetNumber(%s): %s: -want error, +got error:\n%s", tc.path, tc.reason, diff)
}
if diff := cmp.Diff(tc.want.value, got); diff != "" {
t.Errorf("\np.GetNumber(%s): %s: -want, +got:\n%s", tc.path, tc.reason, diff)
}
})
}
}
func TestGetInteger(t *testing.T) {
type want struct {
value int64
err error
}
cases := map[string]struct {
reason string
path string
@ -544,7 +575,7 @@ func TestGetInteger(t *testing.T) {
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
in := make(map[string]any)
in := make(map[string]interface{})
_ = json.Unmarshal(tc.data, &in)
p := Pave(in)
@ -552,7 +583,6 @@ func TestGetInteger(t *testing.T) {
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Fatalf("\np.GetNumber(%s): %s: -want error, +got error:\n%s", tc.path, tc.reason, diff)
}
if diff := cmp.Diff(tc.want.value, got); diff != "" {
t.Errorf("\np.GetNumber(%s): %s: -want, +got:\n%s", tc.path, tc.reason, diff)
}
@ -563,15 +593,13 @@ func TestGetInteger(t *testing.T) {
func TestSetValue(t *testing.T) {
type args struct {
path string
value any
value interface{}
opts []PavedOption
}
type want struct {
object map[string]any
object map[string]interface{}
err error
}
cases := map[string]struct {
reason string
data []byte
@ -586,8 +614,8 @@ func TestSetValue(t *testing.T) {
value: "cool",
},
want: want{
object: map[string]any{
"metadata": map[string]any{
object: map[string]interface{}{
"metadata": map[string]interface{}{
"name": "cool",
},
},
@ -601,8 +629,8 @@ func TestSetValue(t *testing.T) {
value: "cool",
},
want: want{
object: map[string]any{
"metadata": map[string]any{
object: map[string]interface{}{
"metadata": map[string]interface{}{
"name": "cool",
},
},
@ -616,10 +644,10 @@ func TestSetValue(t *testing.T) {
value: "cool",
},
want: want{
object: map[string]any{
"spec": map[string]any{
"containers": []any{
map[string]any{
object: map[string]interface{}{
"spec": map[string]interface{}{
"containers": []interface{}{
map[string]interface{}{
"name": "cool",
},
},
@ -635,10 +663,10 @@ func TestSetValue(t *testing.T) {
value: "cool",
},
want: want{
object: map[string]any{
"spec": map[string]any{
"containers": []any{
map[string]any{
object: map[string]interface{}{
"spec": map[string]interface{}{
"containers": []interface{}{
map[string]interface{}{
"name": "cool",
},
},
@ -654,13 +682,13 @@ func TestSetValue(t *testing.T) {
value: "cooler",
},
want: want{
object: map[string]any{
"spec": map[string]any{
"containers": []any{
map[string]any{
object: map[string]interface{}{
"spec": map[string]interface{}{
"containers": []interface{}{
map[string]interface{}{
"name": "cool",
},
map[string]any{
map[string]interface{}{
"name": "cooler",
},
},
@ -676,9 +704,9 @@ func TestSetValue(t *testing.T) {
value: "a",
},
want: want{
object: map[string]any{
"data": []any{
[]any{"a"},
object: map[string]interface{}{
"data": []interface{}{
[]interface{}{"a"},
},
},
},
@ -691,9 +719,9 @@ func TestSetValue(t *testing.T) {
value: "b",
},
want: want{
object: map[string]any{
"data": []any{
[]any{"a", "b"},
object: map[string]interface{}{
"data": []interface{}{
[]interface{}{"a", "b"},
},
},
},
@ -706,8 +734,8 @@ func TestSetValue(t *testing.T) {
value: "c",
},
want: want{
object: map[string]any{
"data": []any{"a", nil, "c"},
object: map[string]interface{}{
"data": []interface{}{"a", nil, "c"},
},
},
},
@ -719,9 +747,8 @@ func TestSetValue(t *testing.T) {
value: "c",
},
want: want{
object: map[string]any{
"data": []any{"a"},
},
object: map[string]interface{}{
"data": []interface{}{"a"}},
err: errors.Errorf("index %v is greater than max allowed index %v",
DefaultMaxFieldPathIndex+1, DefaultMaxFieldPathIndex),
},
@ -735,33 +762,32 @@ func TestSetValue(t *testing.T) {
opts: []PavedOption{WithMaxFieldPathIndex(0)},
},
want: want{
object: map[string]any{
"data": func() []any {
res := make([]any, DefaultMaxFieldPathIndex+2)
object: map[string]interface{}{
"data": func() []interface{} {
res := make([]interface{}, DefaultMaxFieldPathIndex+2)
res[0] = "a"
res[DefaultMaxFieldPathIndex+1] = "c"
return res
}(),
},
}()},
},
},
"MapStringString": {
reason: "A map of string to string should be converted to a map of string to any",
reason: "A map of string to string should be converted to a map of string to interface{}",
data: []byte(`{"metadata":{}}`),
args: args{
path: "metadata.labels",
value: map[string]string{"cool": "very"},
},
want: want{
object: map[string]any{
"metadata": map[string]any{
"labels": map[string]any{"cool": "very"},
object: map[string]interface{}{
"metadata": map[string]interface{}{
"labels": map[string]interface{}{"cool": "very"},
},
},
},
},
"OwnerReference": {
reason: "An ObjectReference (i.e. struct) should be converted to a map of string to any",
reason: "An ObjectReference (i.e. struct) should be converted to a map of string to interface{}",
data: []byte(`{"metadata":{}}`),
args: args{
path: "metadata.ownerRefs[0]",
@ -773,10 +799,10 @@ func TestSetValue(t *testing.T) {
},
},
want: want{
object: map[string]any{
"metadata": map[string]any{
"ownerRefs": []any{
map[string]any{
object: map[string]interface{}{
"metadata": map[string]interface{}{
"ownerRefs": []interface{}{
map[string]interface{}{
"apiVersion": "v",
"kind": "k",
"name": "n",
@ -794,7 +820,7 @@ func TestSetValue(t *testing.T) {
path: "data[0]",
},
want: want{
object: map[string]any{"data": map[string]any{}},
object: map[string]interface{}{"data": map[string]interface{}{}},
err: errors.New("data is not an array"),
},
},
@ -805,7 +831,7 @@ func TestSetValue(t *testing.T) {
path: "data.name",
},
want: want{
object: map[string]any{"data": []any{}},
object: map[string]interface{}{"data": []interface{}{}},
err: errors.New("data is not an object"),
},
},
@ -815,7 +841,7 @@ func TestSetValue(t *testing.T) {
path: "spec[]",
},
want: want{
object: map[string]any{},
object: map[string]interface{}{},
err: errors.Wrap(errors.New("unexpected ']' at position 5"), "cannot parse path \"spec[]\""),
},
},
@ -823,7 +849,7 @@ func TestSetValue(t *testing.T) {
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
in := make(map[string]any)
in := make(map[string]interface{})
_ = json.Unmarshal(tc.data, &in)
p := Pave(in, tc.args.opts...)
@ -831,7 +857,6 @@ func TestSetValue(t *testing.T) {
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Fatalf("\np.SetValue(%s, %v): %s: -want error, +got error:\n%s", tc.args.path, tc.args.value, tc.reason, diff)
}
if diff := cmp.Diff(tc.want.object, p.object); diff != "" {
t.Fatalf("\np.SetValue(%s, %v): %s: -want, +got:\n%s", tc.args.path, tc.args.value, tc.reason, diff)
}
@ -844,7 +869,6 @@ func TestExpandWildcards(t *testing.T) {
expanded []string
err error
}
cases := map[string]struct {
reason string
path string
@ -978,19 +1002,11 @@ func TestExpandWildcards(t *testing.T) {
err: errors.Wrap(errors.New("unexpected ']' at position 5"), "cannot parse path \"spec[]\""),
},
},
"NilValue": {
reason: "Requesting a wildcard for an object that has nil value",
path: "spec.containers[*].name",
data: []byte(`{"spec":{"containers": null}}`),
want: want{
err: errors.Wrapf(notFoundError{errors.Errorf("wildcard field %q is not found in the path", "spec.containers")}, "cannot expand wildcards for segments: %q", "spec.containers[*].name"),
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
in := make(map[string]any)
in := make(map[string]interface{})
_ = json.Unmarshal(tc.data, &in)
p := Pave(in)
@ -998,7 +1014,6 @@ func TestExpandWildcards(t *testing.T) {
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Fatalf("\np.ExpandWildcards(%s): %s: -want error, +got error:\n%s", tc.path, tc.reason, diff)
}
if diff := cmp.Diff(tc.want.expanded, got, cmpopts.SortSlices(func(x, y string) bool {
return x < y
})); diff != "" {
@ -1007,292 +1022,3 @@ func TestExpandWildcards(t *testing.T) {
})
}
}
func TestDeleteField(t *testing.T) {
type args struct {
path string
}
type want struct {
object map[string]any
err error
}
cases := map[string]struct {
reason string
data []byte
args args
want want
}{
"MalformedPath": {
reason: "Requesting an invalid field path should fail",
args: args{
path: "spec[]",
},
want: want{
object: map[string]any{},
err: errors.Wrap(errors.New("unexpected ']' at position 5"), "cannot parse path \"spec[]\""),
},
},
"IndexGivenForNonArray": {
reason: "Trying to delete a numbered index from a map should fail.",
data: []byte(`{"data":{}}`),
args: args{
path: "data[0]",
},
want: want{
object: map[string]any{"data": map[string]any{}},
err: errors.Wrap(errors.New("not an array"), "cannot delete data[0]"),
},
},
"KeyGivenForNonMap": {
reason: "Trying to delete a key from an array should fail.",
data: []byte(`{"data":[["a"]]}`),
args: args{
path: "data[0].a",
},
want: want{
object: map[string]any{"data": []any{[]any{"a"}}},
err: errors.Wrap(errors.New("not an object"), "cannot delete data[0].a"),
},
},
"KeyGivenForNonMapInMiddle": {
reason: "If one of the segments that is a field corresponds to array, it should fail.",
data: []byte(`{"data":[{"another": "field"}]}`),
args: args{
path: "data.some.another",
},
want: want{
object: map[string]any{"data": []any{
map[string]any{
"another": "field",
},
}},
err: errors.New("data is not an object"),
},
},
"IndexGivenForNonArrayInMiddle": {
reason: "If one of the segments that is an index corresponds to map, it should fail.",
data: []byte(`{"data":{"another": ["field"]}}`),
args: args{
path: "data[0].another",
},
want: want{
object: map[string]any{"data": map[string]any{
"another": []any{
"field",
},
}},
err: errors.New("data is not an array"),
},
},
"ObjectField": {
reason: "Deleting a field from a map should work.",
data: []byte(`{"metadata":{"name":"lame"}}`),
args: args{
path: "metadata.name",
},
want: want{
object: map[string]any{
"metadata": map[string]any{},
},
},
},
"ObjectSingleField": {
reason: "Deleting a field from a map should work.",
data: []byte(`{"metadata":{"name":"lame"}, "olala": {"omama": "koala"}}`),
args: args{
path: "metadata",
},
want: want{
object: map[string]any{
"olala": map[string]any{
"omama": "koala",
},
},
},
},
"ObjectLeafField": {
reason: "Deleting a field that is deep in the tree from a map should work.",
data: []byte(`{"spec":{"some": {"more": "delete-me"}}}`),
args: args{
path: "spec.some.more",
},
want: want{
object: map[string]any{
"spec": map[string]any{
"some": map[string]any{},
},
},
},
},
"ObjectMidField": {
reason: "Deleting a field that is in the middle of the tree from a map should work.",
data: []byte(`{"spec":{"some": {"more": "delete-me"}}}`),
args: args{
path: "spec.some",
},
want: want{
object: map[string]any{
"spec": map[string]any{},
},
},
},
"ObjectInArray": {
reason: "Deleting a field that is in the middle of the tree from a map should work.",
data: []byte(`{"spec":[{"some": {"more": "delete-me"}}]}`),
args: args{
path: "spec[0].some.more",
},
want: want{
object: map[string]any{
"spec": []any{
map[string]any{
"some": map[string]any{},
},
},
},
},
},
"ArrayFirstElement": {
reason: "Deleting the first element from an array should work",
data: []byte(`{"items":["a", "b"]}`),
args: args{
path: "items[0]",
},
want: want{
object: map[string]any{
"items": []any{
"b",
},
},
},
},
"ArrayLastElement": {
reason: "Deleting the last element from an array should work",
data: []byte(`{"items":["a", "b"]}`),
args: args{
path: "items[1]",
},
want: want{
object: map[string]any{
"items": []any{
"a",
},
},
},
},
"ArrayMidElement": {
reason: "Deleting an element that is neither first nor last from an array should work",
data: []byte(`{"items":["a", "b", "c"]}`),
args: args{
path: "items[1]",
},
want: want{
object: map[string]any{
"items": []any{
"a",
"c",
},
},
},
},
"ArrayOnlyElements": {
reason: "Deleting the only element from an array should work",
data: []byte(`{"items":["a"]}`),
args: args{
path: "items[0]",
},
want: want{
object: map[string]any{
"items": []any{},
},
},
},
"ArrayMultipleIndex": {
reason: "Deleting an element from an array of array should work",
data: []byte(`{"items":[["a", "b"]]}`),
args: args{
path: "items[0][1]",
},
want: want{
object: map[string]any{
"items": []any{
[]any{
"a",
},
},
},
},
},
"ArrayNoElement": {
reason: "Deleting an element from an empty array should work",
data: []byte(`{"items":[]}`),
args: args{
path: "items[0]",
},
want: want{
object: map[string]any{
"items": []any{},
},
},
},
"NonExistentPathInMap": {
reason: "It should be no-op if the field does not exist already.",
data: []byte(`{"items":[]}`),
args: args{
path: "items[0].metadata",
},
want: want{
object: map[string]any{
"items": []any{},
},
},
},
"NonExistentPathInArray": {
reason: "It should be no-op if the field does not exist already.",
data: []byte(`{"items":{"some": "other"}}`),
args: args{
path: "items.metadata[0]",
},
want: want{
object: map[string]any{
"items": map[string]any{
"some": "other",
},
},
},
},
"NonExistentElementInArray": {
reason: "It should be no-op if the field does not exist already.",
data: []byte(`{"items":["some", "other"]}`),
args: args{
path: "items[5]",
},
want: want{
object: map[string]any{
"items": []any{
"some", "other",
},
},
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
in := make(map[string]any)
_ = json.Unmarshal(tc.data, &in)
p := Pave(in)
err := p.DeleteField(tc.args.path)
if diff := cmp.Diff(tc.want.err, err, test.EquateErrors()); diff != "" {
t.Fatalf("\np.DeleteField(%s): %s: -want error, +got error:\n%s", tc.args.path, tc.reason, diff)
}
if diff := cmp.Diff(tc.want.object, p.object); diff != "" {
t.Fatalf("\np.DeleteField(%s): %s: -want, +got:\n%s", tc.args.path, tc.reason, diff)
}
})
}
}

Some files were not shown because too many files have changed in this diff Show More