mirror of https://github.com/spiffe/spire.git
Compare commits
218 Commits
Author | SHA1 | Date |
---|---|---|
|
9fddca5c1a | |
|
6767a3c3c9 | |
|
2d261f2db4 | |
|
1ff99fa96d | |
|
fd2b898aae | |
|
11f821cf31 | |
|
0508762982 | |
|
b5beef4006 | |
|
7a3bf0a650 | |
|
b9b00ad3cc | |
|
32ee70fed9 | |
|
32b64d04b8 | |
|
d3bdc8c163 | |
|
dff8a88df0 | |
|
46ac9d92f5 | |
|
f687bf21e8 | |
|
6512090356 | |
|
ba824b04d1 | |
|
21359c4ebf | |
|
55e614c112 | |
|
76299f86f3 | |
|
70fadb5861 | |
|
b991f8b6bd | |
|
0dfc8a6cf2 | |
|
c9f33b41f2 | |
|
6185e7f0ed | |
|
6fcb5ef7ec | |
|
df29c3ca1a | |
|
3d4579bb12 | |
|
c1a25db8e1 | |
|
b6760ad0d2 | |
|
7cdc62c353 | |
|
48133acad8 | |
|
cbdf62d24b | |
|
408849b989 | |
|
2a341f8ac6 | |
|
ce88f8651a | |
|
e7083db69f | |
|
114f457701 | |
|
656e83d0fe | |
|
8866c602a5 | |
|
bc52fe64b4 | |
|
1c7a915e36 | |
|
19cf417169 | |
|
84fe369baf | |
|
77c1f6e701 | |
|
49ceb4a2ed | |
|
eacccc5fa8 | |
|
a4561482d2 | |
|
9ece0645bc | |
|
9287c56b11 | |
|
8841281892 | |
|
13db20d282 | |
|
aaa5235f7e | |
|
4c28ec2268 | |
|
80001e9179 | |
|
11de50bb38 | |
|
c2f72bd8b7 | |
|
08fbaa2a95 | |
|
10dae54b05 | |
|
41aedaea5a | |
|
5e7fe55ad2 | |
|
993bab01a8 | |
|
de2c63f9a8 | |
|
0b0b86a1d3 | |
|
9aebf800d1 | |
|
f448057e3f | |
|
1810d8c9fe | |
|
8cd40be1de | |
|
ea97e994b8 | |
|
81d54ef5ba | |
|
03d18e4024 | |
|
d0b274e6c3 | |
|
843614b927 | |
|
1b15dd811e | |
|
6709c8b6b4 | |
|
3e892f15d6 | |
|
66559bc70f | |
|
4dd9a149eb | |
|
662da58da3 | |
|
e5ad056d5e | |
|
4741d49d88 | |
|
f1692f46dc | |
|
449b6bf68a | |
|
9ba374537d | |
|
59f23995d2 | |
|
2bb1232798 | |
|
7015e3a502 | |
|
31b855eefc | |
|
5248b48d78 | |
|
a95033336c | |
|
1c2456c95d | |
|
5ef3d8dd1e | |
|
3badfa8b37 | |
|
acbbb296d1 | |
|
01ebc0e982 | |
|
3d68fd28e5 | |
|
8e3588f1dd | |
|
66d95c8f54 | |
|
94b907137a | |
|
8ce1b369ba | |
|
ed5f781fcb | |
|
31a32b1781 | |
|
7a7ad6e894 | |
|
9ce2aef3ce | |
|
3420abfe3e | |
|
d47d6a32e4 | |
|
d76a21aaeb | |
|
83be5079d2 | |
|
392bafd482 | |
|
6440c58f74 | |
|
a9f27c9b8c | |
|
f769f54d6a | |
|
91da9d334c | |
|
f99f8e3131 | |
|
dfb104403e | |
|
9302a6ef78 | |
|
3d9a1f5ddb | |
|
ceee48c14e | |
|
7c0778b5ed | |
|
3997503420 | |
|
4acd4a0de8 | |
|
a25a6527e2 | |
|
1c547ab293 | |
|
c4aef297cb | |
|
f0f6c2018e | |
|
a56fb1face | |
|
3816e90181 | |
|
2b23ecdef4 | |
|
84c9f8c0c5 | |
|
8bb9df8d1c | |
|
ef6b95ebd9 | |
|
010b34ee05 | |
|
76adc18d21 | |
|
af1a686c04 | |
|
7a9fc4ca23 | |
|
2db7e8969a | |
|
2317be876c | |
|
4ad6c1b55d | |
|
acd8d02800 | |
|
8489f7b35d | |
|
826a819422 | |
|
8f909c8422 | |
|
458652ee47 | |
|
ada8855749 | |
|
e35cb7eb08 | |
|
ecec291c9c | |
|
ccb4914983 | |
|
136a77dbcc | |
|
84ef545c58 | |
|
40fa690c7b | |
|
9dcdf4c362 | |
|
f6a11a0ae0 | |
|
384a1a99ed | |
|
11d5780015 | |
|
ca7d7d6b26 | |
|
eba9465b05 | |
|
266a98da1c | |
|
98176459c0 | |
|
851dbbf451 | |
|
6670ee71da | |
|
866412ade7 | |
|
079033d370 | |
|
1a4f6f0146 | |
|
267ef8e40f | |
|
99f6f5bacd | |
|
46a98d46da | |
|
2adcd08c2b | |
|
f4bf647fe5 | |
|
64722ec925 | |
|
bb33927e65 | |
|
e6714a4c3f | |
|
0057a45d64 | |
|
c98e904ba5 | |
|
d9198c6051 | |
|
e2c3055005 | |
|
90c6753d30 | |
|
8a8253894e | |
|
76f6104c37 | |
|
9307432c4f | |
|
81cc2c173c | |
|
8175933e52 | |
|
67b2d2230f | |
|
dd52118e19 | |
|
ebbf4516a9 | |
|
7d03385f70 | |
|
73a1ddb8bb | |
|
2ae3be687f | |
|
1a52b23ea1 | |
|
5f3487ed5a | |
|
fc71cb9ba4 | |
|
9b00a58ceb | |
|
e755bddad7 | |
|
4878988646 | |
|
653410069c | |
|
518176b889 | |
|
532b51964e | |
|
95d7f9fa06 | |
|
85461f1175 | |
|
3fe67eabc7 | |
|
37736e0d96 | |
|
73a5705ddf | |
|
eae6b6da1b | |
|
b9a5cb360e | |
|
ec2a110073 | |
|
1d2aba2969 | |
|
b18dad4332 | |
|
9754ba1936 | |
|
f48d0218c9 | |
|
b9bfb4f198 | |
|
05a8531190 | |
|
53574b864b | |
|
2bcb0f0a69 | |
|
db984991bb | |
|
7ef7e19d7d | |
|
fd833dcd18 | |
|
1b343d9c4b | |
|
03fee0ee5d |
|
@ -207,7 +207,7 @@ jobs:
|
|||
uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
|
||||
with:
|
||||
path: ./bin/
|
||||
key: ${{ runner.os }}-executables-${{ hashFiles('**/*.exe') }}
|
||||
key: ${{ runner.os }}-executables-${{ github.sha }}
|
||||
- name: Build images
|
||||
run: make images-windows
|
||||
- name: Export images
|
||||
|
@ -239,10 +239,9 @@ jobs:
|
|||
outputs:
|
||||
test: ${{ steps.set-matrix.outputs.test }}
|
||||
|
||||
|
||||
integration:
|
||||
name: integration (linux)
|
||||
runs-on: ubuntu-22.04
|
||||
name: integration (${{ matrix.arch }}) (${{ strategy.job-index}}/${{ strategy.job-total }})
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
needs: [cache-deps, images]
|
||||
timeout-minutes: 45
|
||||
|
||||
|
@ -252,8 +251,14 @@ jobs:
|
|||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
num_runners: [5]
|
||||
runner_id: [1, 2, 3, 4, 5]
|
||||
arch: [x64, arm64]
|
||||
num_runners: [10]
|
||||
runner_id: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
|
||||
include:
|
||||
- arch: x64
|
||||
runs-on: ubuntu-22.04
|
||||
- arch: arm64
|
||||
runs-on: ubuntu-22.04-arm
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
@ -294,13 +299,12 @@ jobs:
|
|||
THIS_RUNNER: ${{ matrix.runner_id }}
|
||||
TERM: dumb
|
||||
CICD_TARGET_BRANCH: ${{ github.event.pull_request.base.ref }}
|
||||
IGNORE_SUITES: ${{ matrix.arch == 'arm64' && 'suites/upstream-authority-ejbca' || '' }} # Waiting for EJBCA to support arm64 (https://github.com/spiffe/spire/issues/6060)
|
||||
run: ./.github/workflows/scripts/split.sh | xargs ./test/integration/test.sh
|
||||
|
||||
|
||||
|
||||
integration-k8s:
|
||||
name: integration-k8s
|
||||
runs-on: ubuntu-22.04
|
||||
name: integration-k8s-${{ matrix.test[0] }}-${{ matrix.arch }}
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
needs: [cache-deps, images, build-matrix]
|
||||
timeout-minutes: 45
|
||||
|
||||
|
@ -310,10 +314,18 @@ jobs:
|
|||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
num_runners: [1]
|
||||
runner_id: [1]
|
||||
#Test elements should be added as [KubeCTLVersion, K8s-image, KindVersion]
|
||||
test: ${{ fromJson(needs.build-matrix.outputs.test) }}
|
||||
arch: [x64, arm64]
|
||||
include:
|
||||
- arch: x64
|
||||
runs-on: ubuntu-22.04
|
||||
num_runners: 1
|
||||
runner_id: 1
|
||||
- arch: arm64
|
||||
runs-on: ubuntu-22.04-arm
|
||||
num_runners: 1
|
||||
runner_id: 1
|
||||
#Test elements should be added as [KubeCTLVersion, K8s-image, KindVersion]
|
||||
test: ${{ fromJson(needs.build-matrix.outputs.test) }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
@ -580,7 +592,7 @@ jobs:
|
|||
uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
|
||||
with:
|
||||
path: ./bin/
|
||||
key: ${{ runner.os }}-executables-${{ hashFiles('**/*.exe') }}
|
||||
key: ${{ runner.os }}-executables-${{ github.sha }}
|
||||
- name: Build artifacts
|
||||
run: ./.github/workflows/scripts/build_artifacts.sh ${{ runner.os }}
|
||||
- name: Archive artifacts
|
||||
|
@ -591,7 +603,7 @@ jobs:
|
|||
|
||||
success:
|
||||
runs-on: ubuntu-22.04
|
||||
needs: [lint, unit-test, unit-test-race-detector, artifacts, integration, lint-windows, unit-test-windows, artifacts-windows, integration-windows]
|
||||
needs: [lint, unit-test, unit-test-race-detector, artifacts, integration, integration-k8s, lint-windows, unit-test-windows, artifacts-windows, integration-windows]
|
||||
timeout-minutes: 30
|
||||
permissions:
|
||||
contents: read
|
||||
|
|
|
@ -192,7 +192,7 @@ jobs:
|
|||
uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
|
||||
with:
|
||||
path: ./bin/
|
||||
key: ${{ runner.os }}-executables-${{ hashFiles('**/*.exe') }}
|
||||
key: ${{ runner.os }}-executables-${{ github.sha }}
|
||||
- name: Build images
|
||||
run: make images-windows
|
||||
- name: Export images
|
||||
|
@ -225,9 +225,10 @@ jobs:
|
|||
test: ${{ steps.set-matrix.outputs.test }}
|
||||
|
||||
integration:
|
||||
name: integration (linux)
|
||||
runs-on: ubuntu-22.04
|
||||
name: integration (${{ matrix.arch }}) (${{ strategy.job-index}}/${{ strategy.job-total }})
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
needs: [cache-deps, images]
|
||||
timeout-minutes: 45
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
@ -235,8 +236,14 @@ jobs:
|
|||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
num_runners: [5]
|
||||
runner_id: [1, 2, 3, 4, 5]
|
||||
arch: [x64, arm64]
|
||||
num_runners: [10]
|
||||
runner_id: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
|
||||
include:
|
||||
- arch: x64
|
||||
runs-on: ubuntu-22.04
|
||||
- arch: arm64
|
||||
runs-on: ubuntu-22.04-arm
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
@ -285,14 +292,15 @@ jobs:
|
|||
NUM_RUNNERS: ${{ matrix.num_runners }}
|
||||
THIS_RUNNER: ${{ matrix.runner_id }}
|
||||
TERM: dumb
|
||||
IGNORE_SUITES: ${{ matrix.arch == 'arm64' && 'suites/upstream-authority-ejbca' || '' }} # Waiting for EJBCA to support arm64 (https://github.com/spiffe/spire/issues/6060)
|
||||
# We don't need to specify CICD_TARGET_BRANCH since the upgrade
|
||||
# integration test will detect the annotated tag for version checking.
|
||||
# CICD_TARGET_BRANCH:
|
||||
run: ./.github/workflows/scripts/split.sh | xargs ./test/integration/test.sh
|
||||
|
||||
integration-k8s:
|
||||
name: integration-k8s
|
||||
runs-on: ubuntu-22.04
|
||||
name: integration-k8s-${{ matrix.test[0] }}-${{ matrix.arch }}
|
||||
runs-on: ${{ matrix.runs-on }}
|
||||
needs: [cache-deps, images, build-matrix]
|
||||
timeout-minutes: 45
|
||||
|
||||
|
@ -302,10 +310,18 @@ jobs:
|
|||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
num_runners: [1]
|
||||
runner_id: [1]
|
||||
#Test elements should be added as [KubeCTLVersion, K8s-image, KindVersion]
|
||||
test: ${{ fromJson(needs.build-matrix.outputs.test) }}
|
||||
arch: [x64, arm64]
|
||||
include:
|
||||
- arch: x64
|
||||
runs-on: ubuntu-22.04
|
||||
num_runners: 1
|
||||
runner_id: 1
|
||||
- arch: arm64
|
||||
runs-on: ubuntu-22.04-arm
|
||||
num_runners: 1
|
||||
runner_id: 1
|
||||
#Test elements should be added as [KubeCTLVersion, K8s-image, KindVersion]
|
||||
test: ${{ fromJson(needs.build-matrix.outputs.test) }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
@ -538,7 +554,7 @@ jobs:
|
|||
uses: actions/cache@1bd1e32a3bdc45362d1e726936510720a7c30a57 # v4.2.0
|
||||
with:
|
||||
path: ./bin/
|
||||
key: ${{ runner.os }}-executables-${{ hashFiles('**/*.exe') }}
|
||||
key: ${{ runner.os }}-executables-${{ github.sha }}
|
||||
- name: Archive artifacts
|
||||
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4
|
||||
with:
|
||||
|
@ -547,7 +563,7 @@ jobs:
|
|||
|
||||
publish-artifacts:
|
||||
runs-on: ubuntu-22.04
|
||||
needs: [lint, unit-test, unit-test-race-detector, artifacts, integration, lint-windows, unit-test-windows, artifacts-windows, integration-windows]
|
||||
needs: [lint, unit-test, unit-test-race-detector, artifacts, integration, integration-k8s, lint-windows, unit-test-windows, artifacts-windows, integration-windows]
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
|
@ -579,7 +595,7 @@ jobs:
|
|||
|
||||
publish-images:
|
||||
runs-on: ubuntu-22.04
|
||||
needs: [lint, unit-test, unit-test-race-detector, artifacts, integration, lint-windows, unit-test-windows, artifacts-windows, integration-windows]
|
||||
needs: [lint, unit-test, unit-test-race-detector, artifacts, integration, integration-k8s, lint-windows, unit-test-windows, artifacts-windows, integration-windows]
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
|
|
|
@ -16,7 +16,24 @@ jobs:
|
|||
days-before-issue-stale: 365 # 1 year
|
||||
days-before-issue-close: 30
|
||||
stale-issue-label: "stale"
|
||||
exempt-issue-labels: "blocked" # Ignore blocked issues
|
||||
stale-issue-message: "This issue is stale because it has been open for 365 days with no activity."
|
||||
close-issue-message: "This issue was closed because it has been inactive for 30 days since being marked as stale."
|
||||
days-before-pr-stale: -1 # Don't handle PRs
|
||||
days-before-pr-close: -1 # Don't handle PRs
|
||||
|
||||
process-stale-blocked-issues:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: actions/stale@28ca1036281a5e5922ead5184a1bbf96e5fc984e # v9.0.0
|
||||
with:
|
||||
only-labels: "blocked"
|
||||
days-before-issue-stale: 30
|
||||
days-before-issue-close: -1 # Don't close blocked issues
|
||||
stale-issue-label: "stale"
|
||||
stale-issue-message: "This issue has been in the blocked state for 30 days, marking as stale so the blocking issue is re-checked."
|
||||
days-before-pr-stale: -1 # Don't handle PRs
|
||||
days-before-pr-close: -1 # Don't handle PRs
|
||||
|
|
|
@ -1 +1 @@
|
|||
1.23.6
|
||||
1.24.4
|
||||
|
|
103
.golangci.yml
103
.golangci.yml
|
@ -1,44 +1,87 @@
|
|||
version: "2"
|
||||
run:
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
timeout: 12m
|
||||
|
||||
issues:
|
||||
exclude-dirs:
|
||||
- testdata$
|
||||
- test/mock
|
||||
exclude-files:
|
||||
- ".*\\.pb\\.go"
|
||||
|
||||
linters:
|
||||
enable:
|
||||
- bodyclose
|
||||
- copyloopvar
|
||||
- durationcheck
|
||||
- errorlint
|
||||
- gofmt
|
||||
- goimports
|
||||
- revive
|
||||
- exptostd
|
||||
- gocritic
|
||||
- gosec
|
||||
- intrange
|
||||
- mirror
|
||||
- misspell
|
||||
- nakedret
|
||||
- nilerr
|
||||
- nilnesserr
|
||||
- nolintlint
|
||||
- predeclared
|
||||
- reassign
|
||||
- revive
|
||||
- unconvert
|
||||
- unparam
|
||||
- intrange
|
||||
- whitespace
|
||||
- gocritic
|
||||
- copyloopvar
|
||||
- wastedassign
|
||||
- nolintlint
|
||||
|
||||
linters-settings:
|
||||
govet:
|
||||
enable:
|
||||
- nilness
|
||||
- sortslice
|
||||
- unusedwrite
|
||||
revive:
|
||||
# minimal confidence for issues, default is 0.8
|
||||
confidence: 0.0
|
||||
- whitespace
|
||||
settings:
|
||||
govet:
|
||||
enable:
|
||||
- sortslice
|
||||
- unusedwrite
|
||||
revive:
|
||||
confidence: 0
|
||||
rules:
|
||||
- name: atomic
|
||||
- name: bool-literal-in-expr
|
||||
- name: constant-logical-expr
|
||||
- name: context-as-argument
|
||||
- name: datarace
|
||||
- name: error-naming
|
||||
- name: error-return
|
||||
- name: errorf
|
||||
- name: identical-branches
|
||||
- name: if-return
|
||||
- name: increment-decrement
|
||||
- name: modifies-value-receiver
|
||||
- name: optimize-operands-order
|
||||
- name: range
|
||||
- name: receiver-naming
|
||||
- name: redundant-import-alias
|
||||
- name: redundant-test-main-exit
|
||||
- name: string-of-int
|
||||
- name: time-equal
|
||||
- name: unconditional-recursion
|
||||
- name: unnecessary-stmt
|
||||
- name: unreachable-code
|
||||
- name: use-any
|
||||
- name: use-errors-new
|
||||
- name: useless-break
|
||||
- name: var-declaration
|
||||
- name: waitgroup-by-value
|
||||
staticcheck:
|
||||
checks:
|
||||
- all
|
||||
- -ST1003
|
||||
- -QF1001
|
||||
- -QF1008
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
rules:
|
||||
- name: unused-parameter
|
||||
disabled: true
|
||||
- linters:
|
||||
- gosec
|
||||
path: (.*_test\.go$)|(^test/.*)
|
||||
text: integer overflow conversion
|
||||
- linters:
|
||||
- revive
|
||||
text: Import alias "v1" is redundant
|
||||
formatters:
|
||||
enable:
|
||||
- gofmt
|
||||
- goimports
|
||||
exclusions:
|
||||
generated: lax
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
golangci_lint v1.60.1
|
||||
golangci-lint v2.1.6
|
||||
markdown_lint v0.37.0
|
||||
protoc 24.4
|
||||
protoc 29.4
|
||||
|
|
117
CHANGELOG.md
117
CHANGELOG.md
|
@ -1,5 +1,122 @@
|
|||
# Changelog
|
||||
|
||||
## [1.12.4] - 2025-07-01
|
||||
|
||||
### Added
|
||||
|
||||
- `k8s_configmap` BundlePublisher plugin (#6105, #6139)
|
||||
- UpstreamAuthority.SubscribeToLocalBundle RPC to stream updates in the local trust bundle (#6090)
|
||||
- Integration tests running on ARM64 platform (#6059)
|
||||
- The OIDC Discovery Provider can now read the trust bundle from a file (#6025)
|
||||
|
||||
### Changed
|
||||
|
||||
- The "Container id not found" log message in the `k8s` WorkloadAttestor has been lowered to Debug level (#6128)
|
||||
- Improvements in lookup performance for entries (#6100, #6034)
|
||||
- Agent no longer pulls the bundle from `trust_bundle_url` if it is not required (#6065)
|
||||
|
||||
### Fixed
|
||||
|
||||
- The `subject_types_supported` value in the discovery document is now properly populated by the OIDC Discovery Provider (#6126)
|
||||
- SPIRE Server gRPC servers are now gracefully stopped (#6076)
|
||||
|
||||
## [1.12.3] - 2025-06-17
|
||||
|
||||
### Security
|
||||
|
||||
- Fixed an issue in spire-agent where the WorkloadAPI.ValidateJWTSVID endpoint did not enforce the presence of the exp (expiration) claim in JWT-SVIDs, as required by the SPIFFE specification.
|
||||
This vulnerability has limited impact: by default, SPIRE does not issue JWT-SVIDs without an expiration claim. Exploitation would require federating with a misconfigured or non-compliant trust domain.
|
||||
Thanks to Edoardo Geraci for reporting this issue.
|
||||
|
||||
## [1.12.2] - 2025-05-19
|
||||
|
||||
### Fixed
|
||||
|
||||
- Regression where PolicyCredentials set by CredentialComposer plugins were not correctly applied to CA certificates. (#6074)
|
||||
|
||||
## [1.12.1] - 2025-05-06
|
||||
|
||||
### Added
|
||||
|
||||
- Support for Unix sockets in trust bundle URLs (#5932)
|
||||
- Documentation improvements and additions (#5989, #6012)
|
||||
|
||||
### Changed
|
||||
|
||||
- `sql_transaction_timeout` replaced by `event_timeout` and value reduced to 15 minutes (#5966)
|
||||
- Experimental events-based cache performance improvements by batch fetching updated entries (#5970)
|
||||
- Improved error messages when retrieving CGroups (#6030).
|
||||
|
||||
### Fixed
|
||||
|
||||
- Corrected invalid `user-agent` value in OIDC Discovery Provider debug logs (#5981).
|
||||
|
||||
## [1.12.0] - 2025-03-21
|
||||
|
||||
### Added
|
||||
|
||||
- Support for any S3 compatible object storage such as MinIO in the `aws_s3` BundlePublisher plugin (#5757)
|
||||
- Support for Rego V1 in the authorization policy engine (#5769)
|
||||
- Support for SAN-based selectors in the `x509pop` NodeAttestor plugin (#5775)
|
||||
|
||||
### Changed
|
||||
|
||||
- Agents now use the SyncAuthorizedEntries API for periodically synchronization of authorized entries by default (#5906)
|
||||
- Timestamps in logs are now formatted to include nanoseconds (#5798)
|
||||
- Improved entry lookup performance in NewJWTSVID and BatchNewX509SVID server RPCs (#5819)
|
||||
- Increased the maximum number of idle database connections to 100 (#5853)
|
||||
- The maximum idle time per database connection is now set to 30 seconds (#5853)
|
||||
- Small documentation improvements (#5873, #5876)
|
||||
- The experimental events-based cache now supports reading events from read-only replicas when data staleness is tolerated, enhancing read performance (#5911)
|
||||
- The `use_legacy_downstream_x509_ca_ttl` server setting is now set to false by default (#5917)
|
||||
|
||||
### Deprecated
|
||||
|
||||
- `use_sync_authorized_entries` experimental agent setting (#5906)
|
||||
- `use_legacy_downstream_x509_ca_ttl` server setting (#5917)
|
||||
|
||||
### Removed
|
||||
|
||||
- The deprecated `k8s_sat` NodeAttestor plugin (#5703)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Issue where agents did not receive entry updates when new entries with the same entry ID were created while `use_sync_authorized_entries` was enabled (#5764)
|
||||
|
||||
## [1.11.3] - 2025-06-17
|
||||
|
||||
### Security
|
||||
|
||||
- Fixed an issue in spire-agent where the WorkloadAPI.ValidateJWTSVID endpoint did not enforce the presence of the exp (expiration) claim in JWT-SVIDs, as required by the SPIFFE specification.
|
||||
This vulnerability has limited impact: by default, SPIRE does not issue JWT-SVIDs without an expiration claim. Exploitation would require federating with a misconfigured or non-compliant trust domain.
|
||||
Thanks to Edoardo Geraci for reporting this issue.
|
||||
|
||||
## [1.11.2] - 2025-02-13
|
||||
|
||||
### Added
|
||||
|
||||
- `gcp_secretmanager` SVIDStore plugin now supports specifying the regions where secrets are created (#5718)
|
||||
- Support for expanding environment variables in the OIDC Discovery Provider configuration (#5689)
|
||||
- Support for optionally enabling `trust_domain` label for all metrics (#5673)
|
||||
- The JWKS URI returned in the discovery document can now be configured in the OIDC Discovery Provider (#5690)
|
||||
- A server path prefix can now be specified in the OIDC Discovery Provider (#5690)
|
||||
|
||||
### Changed
|
||||
|
||||
- Small documentation improvements (#5809, #5720)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Regression in the hydration of the experimental event-based cache that caused a delay in availability (#5842)
|
||||
- Do not log an error when the Envoy SDS v3 API connection has been closed cleanly (#5835)
|
||||
- SVIDStore plugins to properly parse metadata in entry selectors containing ':' characters (#5750)
|
||||
- Compatibility with deployments that use a server port other than 443 when the `jwt_issuer` configuration is set in the OIDC Discovery Provider (#5690)
|
||||
- Domain verification is now properly done when setting the `jwt_issuer` configuration in the OIDC Discovery Provider (#5690)
|
||||
|
||||
### Security
|
||||
|
||||
- Fixed to properly call the CompareObjectHandles function when it's available on Windows systems, as an extra security measure in the peertracker (#5749)
|
||||
|
||||
## [1.11.1] - 2024-12-12
|
||||
|
||||
### Added
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
# Build stage
|
||||
ARG goversion
|
||||
FROM --platform=${BUILDPLATFORM} golang:${goversion}-alpine3.20 as base
|
||||
FROM --platform=${BUILDPLATFORM} golang:${goversion}-alpine3.22 as base
|
||||
WORKDIR /spire
|
||||
RUN apk --no-cache --update add file bash clang lld pkgconfig git make
|
||||
COPY go.* ./
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM ubuntu:xenial
|
||||
FROM ubuntu:24.04
|
||||
WORKDIR /spire
|
||||
RUN apt-get update && apt-get -y install \
|
||||
curl unzip git build-essential ca-certificates libssl-dev
|
||||
|
|
28
Makefile
28
Makefile
|
@ -32,6 +32,7 @@ help:
|
|||
@echo " $(cyan)race-test$(reset) - run unit tests with race detection"
|
||||
@echo " $(cyan)integration$(reset) - run integration tests (requires Docker images)"
|
||||
@echo " support 'SUITES' variable for executing specific tests"
|
||||
@echo " and 'IGNORE_SUITES' variable for ignoring tests"
|
||||
@echo " e.g. SUITES='suites/join-token suites/k8s' make integration"
|
||||
@echo " $(cyan)integration-windows$(reset) - run integration tests for windows (requires Docker images)"
|
||||
@echo " support 'SUITES' variable for executing specific tests"
|
||||
|
@ -103,6 +104,8 @@ else
|
|||
$(error unsupported ARCH: $(arch1))
|
||||
endif
|
||||
|
||||
ignore_suites := $(IGNORE_SUITES)
|
||||
|
||||
############################################################################
|
||||
# Docker TLS detection for buildx
|
||||
############################################################################
|
||||
|
@ -136,9 +139,8 @@ endif
|
|||
|
||||
go_path := PATH="$(go_bin_dir):$(PATH)"
|
||||
|
||||
golangci_lint_version := $(shell awk '/golangci_lint/{print $$2}' .spire-tool-versions)
|
||||
golangci_lint_version := $(shell awk '/golangci-lint/{print $$2}' .spire-tool-versions)
|
||||
golangci_lint_dir = $(build_dir)/golangci_lint/$(golangci_lint_version)
|
||||
golangci_lint_bin = $(golangci_lint_dir)/golangci-lint
|
||||
golangci_lint_cache = $(golangci_lint_dir)/cache
|
||||
|
||||
markdown_lint_version := $(shell awk '/markdown_lint/{print $$2}' .spire-tool-versions)
|
||||
|
@ -310,11 +312,11 @@ integration:
|
|||
ifeq ($(os1), windows)
|
||||
$(error Integration tests are not supported on windows)
|
||||
else
|
||||
$(E)$(go_path) ./test/integration/test.sh $(SUITES)
|
||||
$(E)$(go_path) IGNORE_SUITES='$(ignore_suites)' ./test/integration/test.sh $(SUITES)
|
||||
endif
|
||||
|
||||
integration-windows:
|
||||
$(E)$(go_path) ./test/integration/test-windows.sh $(SUITES)
|
||||
$(E)$(go_path) IGNORE_SUITES='$(ignore_suites)' ./test/integration/test-windows.sh $(SUITES)
|
||||
|
||||
#############################################################################
|
||||
# Docker Images
|
||||
|
@ -402,8 +404,11 @@ endif
|
|||
|
||||
lint: lint-code lint-md
|
||||
|
||||
lint-code: $(golangci_lint_bin)
|
||||
$(E)PATH="$(go_bin_dir):$(PATH)" GOLANGCI_LINT_CACHE="$(golangci_lint_cache)" $(golangci_lint_bin) run ./...
|
||||
lint-code: | go-check
|
||||
$(E)mkdir -p $(golangci_lint_cache)
|
||||
$(E)$(go_path) GOLANGCI_LINT_CACHE="$(golangci_lint_cache)" \
|
||||
go run github.com/golangci/golangci-lint/v2/cmd/golangci-lint@$(golangci_lint_version) \
|
||||
run --max-issues-per-linter=0 --max-same-issues=0 ./...
|
||||
|
||||
lint-md:
|
||||
$(E)docker run --rm -v "$(DIR):/workdir" $(markdown_lint_image) "**/*.md"
|
||||
|
@ -507,7 +512,7 @@ endif
|
|||
go-bin-path: go-check
|
||||
@echo "$(go_bin_dir):${PATH}"
|
||||
|
||||
install-toolchain: install-protoc install-golangci-lint install-protoc-gen-go install-protoc-gen-doc | go-check
|
||||
install-toolchain: install-protoc install-protoc-gen-go | go-check
|
||||
|
||||
install-protoc: $(protoc_bin)
|
||||
|
||||
|
@ -517,15 +522,6 @@ $(protoc_bin):
|
|||
$(E)mkdir -p $(protoc_dir)
|
||||
$(E)curl -sSfL $(protoc_url) -o $(build_dir)/tmp.zip; unzip -q -d $(protoc_dir) $(build_dir)/tmp.zip; rm $(build_dir)/tmp.zip
|
||||
|
||||
install-golangci-lint: $(golangci_lint_bin)
|
||||
|
||||
$(golangci_lint_bin): | go-check
|
||||
@echo "Installing golangci-lint $(golangci_lint_version)..."
|
||||
$(E)rm -rf $(dir $(golangci_lint_dir))
|
||||
$(E)mkdir -p $(golangci_lint_dir)
|
||||
$(E)mkdir -p $(golangci_lint_cache)
|
||||
$(E)GOBIN=$(golangci_lint_dir) $(go_path) go install github.com/golangci/golangci-lint/cmd/golangci-lint@$(golangci_lint_version)
|
||||
|
||||
install-protoc-gen-go: $(protoc_gen_go_bin)
|
||||
|
||||
$(protoc_gen_go_bin): | go-check
|
||||
|
|
|
@ -61,8 +61,8 @@ The SPIFFE community maintains the SPIRE project. Information on the various SIG
|
|||
A third party security firm ([Cure53](https://cure53.de/)) completed a security audit of SPIFFE and SPIRE in February of 2021. Additionally, the [CNCF Technical Advisory Group for Security](https://github.com/cncf/tag-security) conducted two assessments on SPIFFE and SPIRE in 2018 and 2020. Please find the reports and supporting material, including the threat model exercise results, below.
|
||||
|
||||
- [Cure53 Security Audit Report](doc/cure53-report.pdf)
|
||||
- [SIG-Security SPIFFE/SPIRE Security Assessment: summary](https://github.com/cncf/sig-security/tree/main/assessments/projects/spiffe-spire)
|
||||
- [SIG-Security SPIFFE/SPIRE Security Assessment: full assessment](https://github.com/cncf/sig-security/blob/main/assessments/projects/spiffe-spire/self-assessment.md)
|
||||
- [SIG-Security SPIFFE/SPIRE Security Assessment: summary](https://github.com/cncf/sig-security/tree/main/community/assessments/projects/spiffe-spire)
|
||||
- [SIG-Security SPIFFE/SPIRE Security Assessment: full assessment](https://github.com/cncf/sig-security/blob/main/community/assessments/projects/spiffe-spire/self-assessment.md)
|
||||
- [Scrutinizing SPIRE to Sensibly Strengthen SPIFFE Security](https://blog.spiffe.io/scrutinizing-spire-security-9c82ba542019)
|
||||
|
||||
### Reporting Security Vulnerabilities
|
||||
|
|
|
@ -2,13 +2,11 @@ package run
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/signal"
|
||||
|
@ -26,10 +24,9 @@ import (
|
|||
"github.com/imdario/mergo"
|
||||
"github.com/mitchellh/cli"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spiffe/go-spiffe/v2/spiffeid"
|
||||
"github.com/spiffe/spire/pkg/agent"
|
||||
"github.com/spiffe/spire/pkg/agent/trustbundlesources"
|
||||
"github.com/spiffe/spire/pkg/agent/workloadkey"
|
||||
"github.com/spiffe/spire/pkg/common/bundleutil"
|
||||
"github.com/spiffe/spire/pkg/common/catalog"
|
||||
common_cli "github.com/spiffe/spire/pkg/common/cli"
|
||||
"github.com/spiffe/spire/pkg/common/config"
|
||||
|
@ -37,7 +34,6 @@ import (
|
|||
"github.com/spiffe/spire/pkg/common/health"
|
||||
"github.com/spiffe/spire/pkg/common/idutil"
|
||||
"github.com/spiffe/spire/pkg/common/log"
|
||||
"github.com/spiffe/spire/pkg/common/pemutil"
|
||||
"github.com/spiffe/spire/pkg/common/telemetry"
|
||||
"github.com/spiffe/spire/pkg/common/tlspolicy"
|
||||
)
|
||||
|
@ -55,9 +51,6 @@ const (
|
|||
defaultDefaultAllBundlesName = "ALL"
|
||||
defaultDisableSPIFFECertValidation = false
|
||||
|
||||
bundleFormatPEM = "pem"
|
||||
bundleFormatSPIFFE = "spiffe"
|
||||
|
||||
minimumAvailabilityTarget = 24 * time.Hour
|
||||
)
|
||||
|
||||
|
@ -74,7 +67,7 @@ type agentConfig struct {
|
|||
DataDir string `hcl:"data_dir"`
|
||||
AdminSocketPath string `hcl:"admin_socket_path"`
|
||||
InsecureBootstrap bool `hcl:"insecure_bootstrap"`
|
||||
RetryBootstrap bool `hcl:"retry_bootstrap"`
|
||||
RetryBootstrap *bool `hcl:"retry_bootstrap"`
|
||||
JoinToken string `hcl:"join_token"`
|
||||
LogFile string `hcl:"log_file"`
|
||||
LogFormat string `hcl:"log_format"`
|
||||
|
@ -87,6 +80,7 @@ type agentConfig struct {
|
|||
WorkloadX509SVIDKeyType string `hcl:"workload_x509_svid_key_type"`
|
||||
TrustBundleFormat string `hcl:"trust_bundle_format"`
|
||||
TrustBundlePath string `hcl:"trust_bundle_path"`
|
||||
TrustBundleUnixSocket string `hcl:"trust_bundle_unix_socket"`
|
||||
TrustBundleURL string `hcl:"trust_bundle_url"`
|
||||
TrustDomain string `hcl:"trust_domain"`
|
||||
AllowUnauthenticatedVerifiers bool `hcl:"allow_unauthenticated_verifiers"`
|
||||
|
@ -121,7 +115,7 @@ type experimentalConfig struct {
|
|||
SyncInterval string `hcl:"sync_interval"`
|
||||
NamedPipeName string `hcl:"named_pipe_name"`
|
||||
AdminNamedPipeName string `hcl:"admin_named_pipe_name"`
|
||||
UseSyncAuthorizedEntries bool `hcl:"use_sync_authorized_entries"`
|
||||
UseSyncAuthorizedEntries *bool `hcl:"use_sync_authorized_entries"`
|
||||
RequirePQKEM bool `hcl:"require_pq_kem"`
|
||||
|
||||
Flags fflag.RawConfig `hcl:"feature_flags"`
|
||||
|
@ -262,16 +256,32 @@ func (c *agentConfig) validate() error {
|
|||
return errors.New("only one of trust_bundle_url or trust_bundle_path can be specified, not both")
|
||||
}
|
||||
|
||||
if c.TrustBundleFormat != bundleFormatPEM && c.TrustBundleFormat != bundleFormatSPIFFE {
|
||||
return fmt.Errorf("invalid value for trust_bundle_format, expected %q or %q", bundleFormatPEM, bundleFormatSPIFFE)
|
||||
if c.TrustBundleFormat != trustbundlesources.BundleFormatPEM && c.TrustBundleFormat != trustbundlesources.BundleFormatSPIFFE {
|
||||
return fmt.Errorf("invalid value for trust_bundle_format, expected %q or %q", trustbundlesources.BundleFormatPEM, trustbundlesources.BundleFormatSPIFFE)
|
||||
}
|
||||
|
||||
if c.TrustBundleUnixSocket != "" && c.TrustBundleURL == "" {
|
||||
return errors.New("if trust_bundle_unix_socket is specified, so must be trust_bundle_url")
|
||||
}
|
||||
if c.TrustBundleURL != "" {
|
||||
u, err := url.Parse(c.TrustBundleURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to parse trust bundle URL: %w", err)
|
||||
}
|
||||
if u.Scheme != "https" {
|
||||
if c.TrustBundleUnixSocket != "" {
|
||||
if u.Scheme != "http" {
|
||||
return errors.New("trust bundle URL must start with http:// when used with trust bundle unix socket")
|
||||
}
|
||||
params := u.Query()
|
||||
for key := range params {
|
||||
if strings.HasPrefix(key, "spiffe-") {
|
||||
return errors.New("trust_bundle_url query params can not start with spiffe-")
|
||||
}
|
||||
if strings.HasPrefix(key, "spire-") {
|
||||
return errors.New("trust_bundle_url query params can not start with spire-")
|
||||
}
|
||||
}
|
||||
} else if u.Scheme != "https" {
|
||||
return errors.New("trust bundle URL must start with https://")
|
||||
}
|
||||
}
|
||||
|
@ -319,6 +329,7 @@ func parseFlags(name string, args []string, output io.Writer) (*agentConfig, err
|
|||
flags := flag.NewFlagSet(name, flag.ContinueOnError)
|
||||
flags.SetOutput(output)
|
||||
c := &agentConfig{}
|
||||
retryBootstrap := false
|
||||
|
||||
flags.StringVar(&c.ConfigPath, "config", defaultConfigPath, "Path to a SPIRE config file")
|
||||
flags.StringVar(&c.DataDir, "dataDir", "", "A directory the agent can use for its runtime data")
|
||||
|
@ -332,10 +343,10 @@ func parseFlags(name string, args []string, output io.Writer) (*agentConfig, err
|
|||
flags.StringVar(&c.TrustDomain, "trustDomain", "", "The trust domain that this agent belongs to")
|
||||
flags.StringVar(&c.TrustBundlePath, "trustBundle", "", "Path to the SPIRE server CA bundle")
|
||||
flags.StringVar(&c.TrustBundleURL, "trustBundleUrl", "", "URL to download the SPIRE server CA bundle")
|
||||
flags.StringVar(&c.TrustBundleFormat, "trustBundleFormat", "", fmt.Sprintf("Format of the bootstrap trust bundle, %q or %q", bundleFormatPEM, bundleFormatSPIFFE))
|
||||
flags.StringVar(&c.TrustBundleFormat, "trustBundleFormat", "", fmt.Sprintf("Format of the bootstrap trust bundle, %q or %q", trustbundlesources.BundleFormatPEM, trustbundlesources.BundleFormatSPIFFE))
|
||||
flags.BoolVar(&c.AllowUnauthenticatedVerifiers, "allowUnauthenticatedVerifiers", false, "If true, the agent permits the retrieval of X509 certificate bundles by unregistered clients")
|
||||
flags.BoolVar(&c.InsecureBootstrap, "insecureBootstrap", false, "If true, the agent bootstraps without verifying the server's identity")
|
||||
flags.BoolVar(&c.RetryBootstrap, "retryBootstrap", false, "If true, the agent retries bootstrap with backoff")
|
||||
flags.BoolVar(&retryBootstrap, "retryBootstrap", true, "If true, the agent retries bootstrap with backoff")
|
||||
flags.BoolVar(&c.ExpandEnv, "expandEnv", false, "Expand environment variables in SPIRE config file")
|
||||
|
||||
c.addOSFlags(flags)
|
||||
|
@ -345,6 +356,12 @@ func parseFlags(name string, args []string, output io.Writer) (*agentConfig, err
|
|||
return nil, err
|
||||
}
|
||||
|
||||
flags.Visit(func(f *flag.Flag) {
|
||||
if f.Name == "retryBootstrap" {
|
||||
c.RetryBootstrap = &retryBootstrap
|
||||
}
|
||||
})
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
|
@ -370,87 +387,6 @@ func mergeInput(fileInput *Config, cliInput *agentConfig) (*Config, error) {
|
|||
return c, nil
|
||||
}
|
||||
|
||||
func parseTrustBundle(bundleBytes []byte, trustBundleContentType string) ([]*x509.Certificate, error) {
|
||||
switch trustBundleContentType {
|
||||
case bundleFormatPEM:
|
||||
bundle, err := pemutil.ParseCertificates(bundleBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bundle, nil
|
||||
case bundleFormatSPIFFE:
|
||||
bundle, err := bundleutil.Unmarshal(spiffeid.TrustDomain{}, bundleBytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to parse SPIFFE trust bundle: %w", err)
|
||||
}
|
||||
return bundle.X509Authorities(), nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unknown trust bundle format: %s", trustBundleContentType)
|
||||
}
|
||||
|
||||
func downloadTrustBundle(trustBundleURL string) ([]byte, error) {
|
||||
// Download the trust bundle URL from the user specified URL
|
||||
// We use gosec -- the annotation below will disable a security check that URLs are not tainted
|
||||
/* #nosec G107 */
|
||||
resp, err := http.Get(trustBundleURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to fetch trust bundle URL %s: %w", trustBundleURL, err)
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("error downloading trust bundle: %s", resp.Status)
|
||||
}
|
||||
pemBytes, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read from trust bundle URL %s: %w", trustBundleURL, err)
|
||||
}
|
||||
|
||||
return pemBytes, nil
|
||||
}
|
||||
|
||||
func setupTrustBundle(ac *agent.Config, c *Config) error {
|
||||
// Either download the trust bundle if TrustBundleURL is set, or read it
|
||||
// from disk if TrustBundlePath is set
|
||||
ac.InsecureBootstrap = c.Agent.InsecureBootstrap
|
||||
|
||||
var bundleBytes []byte
|
||||
var err error
|
||||
|
||||
switch {
|
||||
case c.Agent.TrustBundleURL != "":
|
||||
bundleBytes, err = downloadTrustBundle(c.Agent.TrustBundleURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case c.Agent.TrustBundlePath != "":
|
||||
bundleBytes, err = loadTrustBundle(c.Agent.TrustBundlePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not parse trust bundle: %w", err)
|
||||
}
|
||||
default:
|
||||
// If InsecureBootstrap is configured, the bundle is not required
|
||||
if ac.InsecureBootstrap {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
bundle, err := parseTrustBundle(bundleBytes, c.Agent.TrustBundleFormat)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(bundle) == 0 {
|
||||
return errors.New("no certificates found in trust bundle")
|
||||
}
|
||||
|
||||
ac.TrustBundle = bundle
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewAgentConfig(c *Config, logOptions []log.Option, allowUnknownConfig bool) (*agent.Config, error) {
|
||||
ac := &agent.Config{}
|
||||
|
||||
|
@ -458,8 +394,6 @@ func NewAgentConfig(c *Config, logOptions []log.Option, allowUnknownConfig bool)
|
|||
return nil, err
|
||||
}
|
||||
|
||||
ac.RetryBootstrap = c.Agent.RetryBootstrap
|
||||
|
||||
if c.Agent.Experimental.SyncInterval != "" {
|
||||
var err error
|
||||
ac.SyncInterval, err = time.ParseDuration(c.Agent.Experimental.SyncInterval)
|
||||
|
@ -468,8 +402,6 @@ func NewAgentConfig(c *Config, logOptions []log.Option, allowUnknownConfig bool)
|
|||
}
|
||||
}
|
||||
|
||||
ac.UseSyncAuthorizedEntries = c.Agent.Experimental.UseSyncAuthorizedEntries
|
||||
|
||||
serverHostPort := net.JoinHostPort(c.Agent.ServerAddress, strconv.Itoa(c.Agent.ServerPort))
|
||||
ac.ServerAddress = fmt.Sprintf("dns:///%s", serverHostPort)
|
||||
|
||||
|
@ -499,6 +431,18 @@ func NewAgentConfig(c *Config, logOptions []log.Option, allowUnknownConfig bool)
|
|||
ac.LogReopener = log.ReopenOnSignal(logger, reopenableFile)
|
||||
}
|
||||
|
||||
ac.RetryBootstrap = true
|
||||
if c.Agent.RetryBootstrap != nil {
|
||||
ac.Log.Warn("The 'retry_bootstrap' configuration is deprecated. It will be removed in SPIRE 1.14. Please test without the flag before upgrading.")
|
||||
ac.RetryBootstrap = *c.Agent.RetryBootstrap
|
||||
}
|
||||
|
||||
ac.UseSyncAuthorizedEntries = true
|
||||
if c.Agent.Experimental.UseSyncAuthorizedEntries != nil {
|
||||
ac.Log.Warn("The 'use_sync_authorized_entries' configuration is deprecated. The option to disable it will be removed in SPIRE 1.13.")
|
||||
ac.UseSyncAuthorizedEntries = *c.Agent.Experimental.UseSyncAuthorizedEntries
|
||||
}
|
||||
|
||||
if c.Agent.X509SVIDCacheMaxSize < 0 {
|
||||
return nil, errors.New("x509_svid_cache_max_size should not be negative")
|
||||
}
|
||||
|
@ -538,11 +482,16 @@ func NewAgentConfig(c *Config, logOptions []log.Option, allowUnknownConfig bool)
|
|||
}
|
||||
ac.DisableSPIFFECertValidation = c.Agent.SDS.DisableSPIFFECertValidation
|
||||
|
||||
err = setupTrustBundle(ac, c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
ts := &trustbundlesources.Config{
|
||||
InsecureBootstrap: c.Agent.InsecureBootstrap,
|
||||
TrustBundleFormat: c.Agent.TrustBundleFormat,
|
||||
TrustBundlePath: c.Agent.TrustBundlePath,
|
||||
TrustBundleURL: c.Agent.TrustBundleURL,
|
||||
TrustBundleUnixSocket: c.Agent.TrustBundleUnixSocket,
|
||||
}
|
||||
|
||||
ac.TrustBundleSources = trustbundlesources.New(ts, ac.Log.WithField("Logger", "TrustBundleSources"))
|
||||
|
||||
ac.WorkloadKeyType = workloadkey.ECP256
|
||||
if c.Agent.WorkloadX509SVIDKeyType != "" {
|
||||
ac.WorkloadKeyType, err = workloadkey.KeyTypeFromString(c.Agent.WorkloadX509SVIDKeyType)
|
||||
|
@ -687,7 +636,7 @@ func defaultConfig() *Config {
|
|||
DataDir: defaultDataDir,
|
||||
LogLevel: defaultLogLevel,
|
||||
LogFormat: log.DefaultFormat,
|
||||
TrustBundleFormat: bundleFormatPEM,
|
||||
TrustBundleFormat: trustbundlesources.BundleFormatPEM,
|
||||
SDS: sdsConfig{
|
||||
DefaultBundleName: defaultDefaultBundleName,
|
||||
DefaultSVIDName: defaultDefaultSVIDName,
|
||||
|
@ -700,12 +649,3 @@ func defaultConfig() *Config {
|
|||
|
||||
return c
|
||||
}
|
||||
|
||||
func loadTrustBundle(path string) ([]byte, error) {
|
||||
bundleBytes, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bundleBytes, nil
|
||||
}
|
||||
|
|
|
@ -2,8 +2,6 @@ package run
|
|||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
@ -39,117 +37,6 @@ type newAgentConfigCase struct {
|
|||
test func(*testing.T, *agent.Config)
|
||||
}
|
||||
|
||||
func TestDownloadTrustBundle(t *testing.T) {
|
||||
testTB, _ := os.ReadFile(path.Join(util.ProjectRoot(), "conf/agent/dummy_root_ca.crt"))
|
||||
testTBSPIFFE := `{
|
||||
"keys": [
|
||||
{
|
||||
"use": "x509-svid",
|
||||
"kty": "EC",
|
||||
"crv": "P-384",
|
||||
"x": "WjB-nSGSxIYiznb84xu5WGDZj80nL7W1c3zf48Why0ma7Y7mCBKzfQkrgDguI4j0",
|
||||
"y": "Z-0_tDH_r8gtOtLLrIpuMwWHoe4vbVBFte1vj6Xt6WeE8lXwcCvLs_mcmvPqVK9j",
|
||||
"x5c": [
|
||||
"MIIBzDCCAVOgAwIBAgIJAJM4DhRH0vmuMAoGCCqGSM49BAMEMB4xCzAJBgNVBAYTAlVTMQ8wDQYDVQQKDAZTUElGRkUwHhcNMTgwNTEzMTkzMzQ3WhcNMjMwNTEyMTkzMzQ3WjAeMQswCQYDVQQGEwJVUzEPMA0GA1UECgwGU1BJRkZFMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEWjB+nSGSxIYiznb84xu5WGDZj80nL7W1c3zf48Why0ma7Y7mCBKzfQkrgDguI4j0Z+0/tDH/r8gtOtLLrIpuMwWHoe4vbVBFte1vj6Xt6WeE8lXwcCvLs/mcmvPqVK9jo10wWzAdBgNVHQ4EFgQUh6XzV6LwNazA+GTEVOdu07o5yOgwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwGQYDVR0RBBIwEIYOc3BpZmZlOi8vbG9jYWwwCgYIKoZIzj0EAwQDZwAwZAIwE4Me13qMC9i6Fkx0h26y09QZIbuRqA9puLg9AeeAAyo5tBzRl1YL0KNEp02VKSYJAjBdeJvqjJ9wW55OGj1JQwDFD7kWeEB6oMlwPbI/5hEY3azJi16I0uN1JSYTSWGSqWc="
|
||||
]
|
||||
}
|
||||
]
|
||||
}`
|
||||
|
||||
cases := []struct {
|
||||
msg string
|
||||
status int
|
||||
fileContents string
|
||||
format string
|
||||
expectDownloadError bool
|
||||
expectParseError bool
|
||||
}{
|
||||
{
|
||||
msg: "if URL is not found, should be an error",
|
||||
status: http.StatusNotFound,
|
||||
fileContents: "",
|
||||
format: bundleFormatPEM,
|
||||
expectDownloadError: true,
|
||||
expectParseError: false,
|
||||
},
|
||||
{
|
||||
msg: "if URL returns error 500, should be an error",
|
||||
status: http.StatusInternalServerError,
|
||||
fileContents: "",
|
||||
format: bundleFormatPEM,
|
||||
expectDownloadError: true,
|
||||
expectParseError: false,
|
||||
},
|
||||
{
|
||||
msg: "if file is not parseable, should be an error",
|
||||
status: http.StatusOK,
|
||||
fileContents: "NON PEM PARSEABLE TEXT HERE",
|
||||
format: bundleFormatPEM,
|
||||
expectDownloadError: false,
|
||||
expectParseError: true,
|
||||
},
|
||||
{
|
||||
msg: "if file is empty, should be an error",
|
||||
status: http.StatusOK,
|
||||
fileContents: "",
|
||||
format: bundleFormatPEM,
|
||||
expectDownloadError: false,
|
||||
expectParseError: true,
|
||||
},
|
||||
{
|
||||
msg: "if file is valid, should not be an error",
|
||||
status: http.StatusOK,
|
||||
fileContents: string(testTB),
|
||||
format: bundleFormatPEM,
|
||||
expectDownloadError: false,
|
||||
expectParseError: false,
|
||||
},
|
||||
{
|
||||
msg: "if file is not parseable, format is SPIFFE, should not be an error",
|
||||
status: http.StatusOK,
|
||||
fileContents: "[}",
|
||||
format: bundleFormatSPIFFE,
|
||||
expectDownloadError: false,
|
||||
expectParseError: true,
|
||||
},
|
||||
{
|
||||
msg: "if file is valid, format is SPIFFE, should not be an error",
|
||||
status: http.StatusOK,
|
||||
fileContents: testTBSPIFFE,
|
||||
format: bundleFormatSPIFFE,
|
||||
expectDownloadError: false,
|
||||
expectParseError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range cases {
|
||||
t.Run(testCase.msg, func(t *testing.T) {
|
||||
testServer := httptest.NewServer(http.HandlerFunc(
|
||||
func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(testCase.status)
|
||||
_, _ = io.WriteString(w, testCase.fileContents)
|
||||
// if err != nil {
|
||||
// return
|
||||
// }
|
||||
}))
|
||||
defer testServer.Close()
|
||||
bundleBytes, err := downloadTrustBundle(testServer.URL)
|
||||
if testCase.expectDownloadError {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err := parseTrustBundle(bundleBytes, testCase.format)
|
||||
if testCase.expectParseError {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMergeInput(t *testing.T) {
|
||||
cases := []mergeInputCase{
|
||||
{
|
||||
|
@ -679,7 +566,7 @@ func TestNewAgentConfig(t *testing.T) {
|
|||
c.Agent.InsecureBootstrap = false
|
||||
},
|
||||
test: func(t *testing.T, c *agent.Config) {
|
||||
require.False(t, c.InsecureBootstrap)
|
||||
require.False(t, c.TrustBundleSources.GetInsecureBootstrap())
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -691,13 +578,14 @@ func TestNewAgentConfig(t *testing.T) {
|
|||
c.Agent.InsecureBootstrap = true
|
||||
},
|
||||
test: func(t *testing.T, c *agent.Config) {
|
||||
require.True(t, c.InsecureBootstrap)
|
||||
require.True(t, c.TrustBundleSources.GetInsecureBootstrap())
|
||||
},
|
||||
},
|
||||
{
|
||||
msg: "retry_bootstrap should be correctly set to false",
|
||||
input: func(c *Config) {
|
||||
c.Agent.RetryBootstrap = false
|
||||
rb := false
|
||||
c.Agent.RetryBootstrap = &rb
|
||||
},
|
||||
test: func(t *testing.T, c *agent.Config) {
|
||||
require.False(t, c.RetryBootstrap)
|
||||
|
@ -706,7 +594,8 @@ func TestNewAgentConfig(t *testing.T) {
|
|||
{
|
||||
msg: "retry_bootstrap should be correctly set to true",
|
||||
input: func(c *Config) {
|
||||
c.Agent.RetryBootstrap = true
|
||||
rb := true
|
||||
c.Agent.RetryBootstrap = &rb
|
||||
},
|
||||
test: func(t *testing.T, c *agent.Config) {
|
||||
require.True(t, c.RetryBootstrap)
|
||||
|
@ -837,6 +726,51 @@ func TestNewAgentConfig(t *testing.T) {
|
|||
require.Nil(t, c)
|
||||
},
|
||||
},
|
||||
{
|
||||
msg: "trust_bundle_url must start with http:// when unix socket",
|
||||
expectError: true,
|
||||
requireErrorPrefix: "trust bundle URL must start with http://",
|
||||
input: func(c *Config) {
|
||||
// remove trust_bundle_path provided by defaultValidConfig()
|
||||
c.Agent.TrustBundlePath = ""
|
||||
c.Agent.TrustBundleURL = "foo.bar"
|
||||
c.Agent.TrustBundleUnixSocket = "foo.bar"
|
||||
c.Agent.InsecureBootstrap = false
|
||||
},
|
||||
test: func(t *testing.T, c *agent.Config) {
|
||||
require.Nil(t, c)
|
||||
},
|
||||
},
|
||||
{
|
||||
msg: "trust_bundle_url query params can not start with spiffe- when unix socket",
|
||||
expectError: true,
|
||||
requireErrorPrefix: "trust_bundle_url query params can not start with spiffe-",
|
||||
input: func(c *Config) {
|
||||
// remove trust_bundle_path provided by defaultValidConfig()
|
||||
c.Agent.TrustBundlePath = ""
|
||||
c.Agent.TrustBundleURL = "http://localhost/trustbundle?spiffe-test=foo"
|
||||
c.Agent.TrustBundleUnixSocket = "foo.bar"
|
||||
c.Agent.InsecureBootstrap = false
|
||||
},
|
||||
test: func(t *testing.T, c *agent.Config) {
|
||||
require.Nil(t, c)
|
||||
},
|
||||
},
|
||||
{
|
||||
msg: "trust_bundle_url query params can not start with spire- when unix socket",
|
||||
expectError: true,
|
||||
requireErrorPrefix: "trust_bundle_url query params can not start with spire-",
|
||||
input: func(c *Config) {
|
||||
// remove trust_bundle_path provided by defaultValidConfig()
|
||||
c.Agent.TrustBundlePath = ""
|
||||
c.Agent.TrustBundleURL = "http://localhost/trustbundle?spire-test=foo"
|
||||
c.Agent.TrustBundleUnixSocket = "foo.bar"
|
||||
c.Agent.InsecureBootstrap = false
|
||||
},
|
||||
test: func(t *testing.T, c *agent.Config) {
|
||||
require.Nil(t, c)
|
||||
},
|
||||
},
|
||||
{
|
||||
msg: "workload_key_type is not set",
|
||||
input: func(c *Config) {
|
||||
|
|
|
@ -85,11 +85,8 @@ func printBundle(out io.Writer, bundle *types.Bundle) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if _, err := fmt.Fprintln(out, o.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
_, err = fmt.Fprintln(out, o.String())
|
||||
return err
|
||||
}
|
||||
|
||||
// bundleFromProto converts a bundle from the given *types.Bundle to *spiffebundle.Bundle
|
||||
|
|
|
@ -4,14 +4,16 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
|
||||
"github.com/mitchellh/cli"
|
||||
entryv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1"
|
||||
"github.com/spiffe/spire-api-sdk/proto/spire/api/types"
|
||||
"github.com/spiffe/spire/cmd/spire-server/util"
|
||||
serverutil "github.com/spiffe/spire/cmd/spire-server/util"
|
||||
commoncli "github.com/spiffe/spire/pkg/common/cli"
|
||||
"github.com/spiffe/spire/pkg/common/cliprinter"
|
||||
"github.com/spiffe/spire/pkg/common/idutil"
|
||||
"github.com/spiffe/spire/pkg/common/util"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
|
@ -21,7 +23,7 @@ func NewCreateCommand() cli.Command {
|
|||
}
|
||||
|
||||
func newCreateCommand(env *commoncli.Env) cli.Command {
|
||||
return util.AdaptCommand(env, &createCommand{env: env})
|
||||
return serverutil.AdaptCommand(env, &createCommand{env: env})
|
||||
}
|
||||
|
||||
type createCommand struct {
|
||||
|
@ -104,7 +106,7 @@ func (c *createCommand) AppendFlags(f *flag.FlagSet) {
|
|||
cliprinter.AppendFlagWithCustomPretty(&c.printer, f, c.env, prettyPrintCreate)
|
||||
}
|
||||
|
||||
func (c *createCommand) Run(ctx context.Context, _ *commoncli.Env, serverClient util.ServerClient) error {
|
||||
func (c *createCommand) Run(ctx context.Context, _ *commoncli.Env, serverClient serverutil.ServerClient) error {
|
||||
if err := c.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -175,6 +177,16 @@ func (c *createCommand) parseConfig() ([]*types.Entry, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
x509SvidTTL, err := util.CheckedCast[int32](c.x509SVIDTTL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid value for X509 SVID TTL: %w", err)
|
||||
}
|
||||
|
||||
jwtSvidTTL, err := util.CheckedCast[int32](c.jwtSVIDTTL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid value for JWT SVID TTL: %w", err)
|
||||
}
|
||||
|
||||
e := &types.Entry{
|
||||
Id: c.entryID,
|
||||
ParentId: parentID,
|
||||
|
@ -183,14 +195,14 @@ func (c *createCommand) parseConfig() ([]*types.Entry, error) {
|
|||
ExpiresAt: c.entryExpiry,
|
||||
DnsNames: c.dnsNames,
|
||||
StoreSvid: c.storeSVID,
|
||||
X509SvidTtl: int32(c.x509SVIDTTL),
|
||||
JwtSvidTtl: int32(c.jwtSVIDTTL),
|
||||
X509SvidTtl: x509SvidTTL,
|
||||
JwtSvidTtl: jwtSvidTTL,
|
||||
Hint: c.hint,
|
||||
}
|
||||
|
||||
selectors := []*types.Selector{}
|
||||
for _, s := range c.selectors {
|
||||
cs, err := util.ParseSelector(s)
|
||||
cs, err := serverutil.ParseSelector(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -254,7 +266,7 @@ func prettyPrintCreate(env *commoncli.Env, results ...any) error {
|
|||
|
||||
for _, r := range failed {
|
||||
env.ErrPrintf("Failed to create the following entry (code: %s, msg: %q):\n",
|
||||
codes.Code(r.Status.Code),
|
||||
util.MustCast[codes.Code](r.Status.Code),
|
||||
r.Status.Message)
|
||||
printEntry(r.Entry, env.ErrPrintf)
|
||||
}
|
||||
|
|
|
@ -10,9 +10,10 @@ import (
|
|||
|
||||
"github.com/mitchellh/cli"
|
||||
entryv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1"
|
||||
"github.com/spiffe/spire/cmd/spire-server/util"
|
||||
serverutil "github.com/spiffe/spire/cmd/spire-server/util"
|
||||
commoncli "github.com/spiffe/spire/pkg/common/cli"
|
||||
"github.com/spiffe/spire/pkg/common/cliprinter"
|
||||
"github.com/spiffe/spire/pkg/common/util"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
|
@ -22,7 +23,7 @@ func NewDeleteCommand() cli.Command {
|
|||
}
|
||||
|
||||
func newDeleteCommand(env *commoncli.Env) cli.Command {
|
||||
return util.AdaptCommand(env, &deleteCommand{env: env})
|
||||
return serverutil.AdaptCommand(env, &deleteCommand{env: env})
|
||||
}
|
||||
|
||||
type deleteCommand struct {
|
||||
|
@ -70,7 +71,7 @@ func parseEntryDeleteJSON(path string) ([]string, error) {
|
|||
return batchDeleteEntryRequest.Ids, nil
|
||||
}
|
||||
|
||||
func (c *deleteCommand) Run(ctx context.Context, _ *commoncli.Env, serverClient util.ServerClient) error {
|
||||
func (c *deleteCommand) Run(ctx context.Context, _ *commoncli.Env, serverClient serverutil.ServerClient) error {
|
||||
if err := c.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -135,7 +136,7 @@ func (c *deleteCommand) prettyPrintDelete(env *commoncli.Env, results ...any) er
|
|||
for _, result := range failed {
|
||||
env.ErrPrintf("Failed to delete entry with ID %s (code: %s, msg: %q)\n",
|
||||
result.Id,
|
||||
codes.Code(result.Status.Code),
|
||||
util.MustCast[codes.Code](result.Status.Code),
|
||||
result.Status.Message)
|
||||
}
|
||||
|
||||
|
|
|
@ -4,13 +4,15 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
|
||||
"github.com/mitchellh/cli"
|
||||
entryv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1"
|
||||
"github.com/spiffe/spire-api-sdk/proto/spire/api/types"
|
||||
"github.com/spiffe/spire/cmd/spire-server/util"
|
||||
serverutil "github.com/spiffe/spire/cmd/spire-server/util"
|
||||
commoncli "github.com/spiffe/spire/pkg/common/cli"
|
||||
"github.com/spiffe/spire/pkg/common/cliprinter"
|
||||
"github.com/spiffe/spire/pkg/common/util"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
|
@ -20,7 +22,7 @@ func NewUpdateCommand() cli.Command {
|
|||
}
|
||||
|
||||
func newUpdateCommand(env *commoncli.Env) cli.Command {
|
||||
return util.AdaptCommand(env, &updateCommand{env: env})
|
||||
return serverutil.AdaptCommand(env, &updateCommand{env: env})
|
||||
}
|
||||
|
||||
type updateCommand struct {
|
||||
|
@ -99,7 +101,7 @@ func (c *updateCommand) AppendFlags(f *flag.FlagSet) {
|
|||
cliprinter.AppendFlagWithCustomPretty(&c.printer, f, c.env, prettyPrintUpdate)
|
||||
}
|
||||
|
||||
func (c *updateCommand) Run(ctx context.Context, _ *commoncli.Env, serverClient util.ServerClient) error {
|
||||
func (c *updateCommand) Run(ctx context.Context, _ *commoncli.Env, serverClient serverutil.ServerClient) error {
|
||||
if err := c.validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -169,6 +171,16 @@ func (c *updateCommand) parseConfig() ([]*types.Entry, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
x509SvidTTL, err := util.CheckedCast[int32](c.x509SvidTTL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid value for X509 SVID TTL: %w", err)
|
||||
}
|
||||
|
||||
jwtSvidTTL, err := util.CheckedCast[int32](c.jwtSvidTTL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid value for JWT SVID TTL: %w", err)
|
||||
}
|
||||
|
||||
e := &types.Entry{
|
||||
Id: c.entryID,
|
||||
ParentId: parentID,
|
||||
|
@ -176,14 +188,14 @@ func (c *updateCommand) parseConfig() ([]*types.Entry, error) {
|
|||
Downstream: c.downstream,
|
||||
ExpiresAt: c.entryExpiry,
|
||||
DnsNames: c.dnsNames,
|
||||
X509SvidTtl: int32(c.x509SvidTTL),
|
||||
JwtSvidTtl: int32(c.jwtSvidTTL),
|
||||
X509SvidTtl: x509SvidTTL,
|
||||
JwtSvidTtl: jwtSvidTTL,
|
||||
Hint: c.hint,
|
||||
}
|
||||
|
||||
selectors := []*types.Selector{}
|
||||
for _, s := range c.selectors {
|
||||
cs, err := util.ParseSelector(s)
|
||||
cs, err := serverutil.ParseSelector(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -240,7 +252,7 @@ func prettyPrintUpdate(env *commoncli.Env, results ...any) error {
|
|||
// Print entries that failed to be updated
|
||||
for _, r := range failed {
|
||||
env.ErrPrintf("Failed to update the following entry (code: %s, msg: %q):\n",
|
||||
codes.Code(r.Status.Code),
|
||||
util.MustCast[codes.Code](r.Status.Code),
|
||||
r.Status.Message)
|
||||
printEntry(r.Entry, env.ErrPrintf)
|
||||
}
|
||||
|
|
|
@ -9,9 +9,10 @@ import (
|
|||
"github.com/mitchellh/cli"
|
||||
trustdomainv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/trustdomain/v1"
|
||||
"github.com/spiffe/spire-api-sdk/proto/spire/api/types"
|
||||
"github.com/spiffe/spire/cmd/spire-server/util"
|
||||
serverutil "github.com/spiffe/spire/cmd/spire-server/util"
|
||||
commoncli "github.com/spiffe/spire/pkg/common/cli"
|
||||
"github.com/spiffe/spire/pkg/common/cliprinter"
|
||||
"github.com/spiffe/spire/pkg/common/util"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
|
@ -26,7 +27,7 @@ func NewCreateCommand() cli.Command {
|
|||
}
|
||||
|
||||
func newCreateCommand(env *commoncli.Env) cli.Command {
|
||||
return util.AdaptCommand(env, &createCommand{env: env})
|
||||
return serverutil.AdaptCommand(env, &createCommand{env: env})
|
||||
}
|
||||
|
||||
type createCommand struct {
|
||||
|
@ -52,7 +53,7 @@ func (c *createCommand) AppendFlags(f *flag.FlagSet) {
|
|||
cliprinter.AppendFlagWithCustomPretty(&c.printer, f, c.env, c.prettyPrintCreate)
|
||||
}
|
||||
|
||||
func (c *createCommand) Run(ctx context.Context, _ *commoncli.Env, serverClient util.ServerClient) error {
|
||||
func (c *createCommand) Run(ctx context.Context, _ *commoncli.Env, serverClient serverutil.ServerClient) error {
|
||||
federationRelationships, err := getRelationships(c.config, c.path)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -101,7 +102,7 @@ func (c *createCommand) prettyPrintCreate(env *commoncli.Env, results ...any) er
|
|||
for _, r := range failed {
|
||||
env.Println()
|
||||
env.ErrPrintf("Failed to create the following federation relationship (code: %s, msg: %q):\n",
|
||||
codes.Code(r.Status.Code),
|
||||
util.MustCast[codes.Code](r.Status.Code),
|
||||
r.Status.Message)
|
||||
printFederationRelationship(r.FederationRelationship, env.ErrPrintf)
|
||||
}
|
||||
|
|
|
@ -9,9 +9,10 @@ import (
|
|||
"github.com/mitchellh/cli"
|
||||
trustdomainv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/trustdomain/v1"
|
||||
"github.com/spiffe/spire-api-sdk/proto/spire/api/types"
|
||||
"github.com/spiffe/spire/cmd/spire-server/util"
|
||||
serverutil "github.com/spiffe/spire/cmd/spire-server/util"
|
||||
commoncli "github.com/spiffe/spire/pkg/common/cli"
|
||||
"github.com/spiffe/spire/pkg/common/cliprinter"
|
||||
"github.com/spiffe/spire/pkg/common/util"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
|
@ -21,7 +22,7 @@ func NewUpdateCommand() cli.Command {
|
|||
}
|
||||
|
||||
func newUpdateCommand(env *commoncli.Env) cli.Command {
|
||||
return util.AdaptCommand(env, &updateCommand{env: env})
|
||||
return serverutil.AdaptCommand(env, &updateCommand{env: env})
|
||||
}
|
||||
|
||||
type updateCommand struct {
|
||||
|
@ -47,7 +48,7 @@ func (c *updateCommand) AppendFlags(f *flag.FlagSet) {
|
|||
cliprinter.AppendFlagWithCustomPretty(&c.printer, f, c.env, c.prettyPrintUpdate)
|
||||
}
|
||||
|
||||
func (c *updateCommand) Run(ctx context.Context, _ *commoncli.Env, serverClient util.ServerClient) error {
|
||||
func (c *updateCommand) Run(ctx context.Context, _ *commoncli.Env, serverClient serverutil.ServerClient) error {
|
||||
federationRelationships, err := getRelationships(c.config, c.path)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -97,7 +98,7 @@ func (c *updateCommand) prettyPrintUpdate(env *commoncli.Env, results ...any) er
|
|||
for _, r := range failed {
|
||||
env.Println()
|
||||
env.ErrPrintf("Failed to update the following federation relationship (code: %s, msg: %q):\n",
|
||||
codes.Code(r.Status.Code),
|
||||
util.MustCast[codes.Code](r.Status.Code),
|
||||
r.Status.Message)
|
||||
printFederationRelationship(r.FederationRelationship, env.ErrPrintf)
|
||||
}
|
||||
|
|
|
@ -12,11 +12,12 @@ import (
|
|||
"github.com/spiffe/go-spiffe/v2/spiffeid"
|
||||
svidv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/svid/v1"
|
||||
"github.com/spiffe/spire-api-sdk/proto/spire/api/types"
|
||||
"github.com/spiffe/spire/cmd/spire-server/util"
|
||||
serverutil "github.com/spiffe/spire/cmd/spire-server/util"
|
||||
commoncli "github.com/spiffe/spire/pkg/common/cli"
|
||||
"github.com/spiffe/spire/pkg/common/cliprinter"
|
||||
"github.com/spiffe/spire/pkg/common/diskutil"
|
||||
"github.com/spiffe/spire/pkg/common/jwtsvid"
|
||||
"github.com/spiffe/spire/pkg/common/util"
|
||||
)
|
||||
|
||||
func NewMintCommand() cli.Command {
|
||||
|
@ -24,7 +25,7 @@ func NewMintCommand() cli.Command {
|
|||
}
|
||||
|
||||
func newMintCommand(env *commoncli.Env) cli.Command {
|
||||
return util.AdaptCommand(env, &mintCommand{env: env})
|
||||
return serverutil.AdaptCommand(env, &mintCommand{env: env})
|
||||
}
|
||||
|
||||
type mintCommand struct {
|
||||
|
@ -52,7 +53,7 @@ func (c *mintCommand) AppendFlags(fs *flag.FlagSet) {
|
|||
cliprinter.AppendFlagWithCustomPretty(&c.printer, fs, c.env, prettyPrintMint)
|
||||
}
|
||||
|
||||
func (c *mintCommand) Run(ctx context.Context, env *commoncli.Env, serverClient util.ServerClient) error {
|
||||
func (c *mintCommand) Run(ctx context.Context, env *commoncli.Env, serverClient serverutil.ServerClient) error {
|
||||
if c.spiffeID == "" {
|
||||
return errors.New("spiffeID must be specified")
|
||||
}
|
||||
|
@ -63,6 +64,10 @@ func (c *mintCommand) Run(ctx context.Context, env *commoncli.Env, serverClient
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ttl, err := ttlToSeconds(c.ttl)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid value for TTL: %w", err)
|
||||
}
|
||||
|
||||
client := serverClient.NewSVIDClient()
|
||||
resp, err := client.MintJWTSVID(ctx, &svidv1.MintJWTSVIDRequest{
|
||||
|
@ -70,7 +75,7 @@ func (c *mintCommand) Run(ctx context.Context, env *commoncli.Env, serverClient
|
|||
TrustDomain: spiffeID.TrustDomain().Name(),
|
||||
Path: spiffeID.Path(),
|
||||
},
|
||||
Ttl: ttlToSeconds(c.ttl),
|
||||
Ttl: ttl,
|
||||
Audience: c.audience,
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -132,8 +137,8 @@ func getJWTSVIDEndOfLife(token string) (time.Time, error) {
|
|||
|
||||
// ttlToSeconds returns the number of seconds in a duration, rounded up to
|
||||
// the nearest second
|
||||
func ttlToSeconds(ttl time.Duration) int32 {
|
||||
return int32((ttl + time.Second - 1) / time.Second)
|
||||
func ttlToSeconds(ttl time.Duration) (int32, error) {
|
||||
return util.CheckedCast[int32]((ttl + time.Second - 1) / time.Second)
|
||||
}
|
||||
|
||||
func prettyPrintMint(env *commoncli.Env, results ...any) error {
|
||||
|
|
|
@ -33,8 +33,5 @@ func PrettyPrintLogger(env *commoncli.Env, results ...any) error {
|
|||
return fmt.Errorf("internal error: logrus log level %d has no name; please report this as a bug", logrusLaunch)
|
||||
}
|
||||
|
||||
if err := env.Printf("Logger Level : %s\nLaunch Level : %s\n\n", currentText, launchText); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return env.Printf("Logger Level : %s\nLaunch Level : %s\n\n", currentText, launchText)
|
||||
}
|
||||
|
|
|
@ -14,7 +14,7 @@ import (
|
|||
func TestPrettyPrintLogger(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
logger interface{}
|
||||
logger any
|
||||
outWriter errorWriter
|
||||
errWriter errorWriter
|
||||
env *commoncli.Env
|
||||
|
|
|
@ -110,6 +110,7 @@ type experimentalConfig struct {
|
|||
CacheReloadInterval string `hcl:"cache_reload_interval"`
|
||||
EventsBasedCache bool `hcl:"events_based_cache"`
|
||||
PruneEventsOlderThan string `hcl:"prune_events_older_than"`
|
||||
EventTimeout string `hcl:"event_timeout"`
|
||||
SQLTransactionTimeout string `hcl:"sql_transaction_timeout"`
|
||||
RequirePQKEM bool `hcl:"require_pq_kem"`
|
||||
|
||||
|
@ -568,6 +569,7 @@ func NewServerConfig(c *Config, logOptions []log.Option, allowUnknownConfig bool
|
|||
}
|
||||
|
||||
if c.Server.UseLegacyDownstreamX509CATTL != nil {
|
||||
sc.Log.Warn("'use_legacy_downstream_x509_ca_ttl' is deprecated and will be removed in a future release")
|
||||
sc.UseLegacyDownstreamX509CATTL = *c.Server.UseLegacyDownstreamX509CATTL
|
||||
if sc.UseLegacyDownstreamX509CATTL {
|
||||
sc.Log.Warn("Using legacy downstream X509 CA TTL calculation; this option will be removed in a future release")
|
||||
|
@ -575,10 +577,9 @@ func NewServerConfig(c *Config, logOptions []log.Option, allowUnknownConfig bool
|
|||
sc.Log.Info("Using preferred downstream X509 CA TTL calculation")
|
||||
}
|
||||
} else {
|
||||
// The default value should be false in SPIRE 1.11.0 and the flag
|
||||
// removed in SPIRE 1.12.0.
|
||||
sc.UseLegacyDownstreamX509CATTL = true
|
||||
sc.Log.Info("Using legacy downstream X509 CA TTL calculation by default; this default will change in a future release")
|
||||
// The flag should be removed in SPIRE 1.13.0.
|
||||
sc.UseLegacyDownstreamX509CATTL = false
|
||||
sc.Log.Info("Using preferred downstream X509 CA TTL calculation")
|
||||
}
|
||||
|
||||
// If the configured TTLs can lead to surprises, then do our best to log an
|
||||
|
@ -707,11 +708,20 @@ func NewServerConfig(c *Config, logOptions []log.Option, allowUnknownConfig bool
|
|||
}
|
||||
|
||||
if c.Server.Experimental.SQLTransactionTimeout != "" {
|
||||
sc.Log.Warn("experimental.sql_transaction_timeout is deprecated, use experimental.event_timeout instead")
|
||||
interval, err := time.ParseDuration(c.Server.Experimental.SQLTransactionTimeout)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse SQL transaction timeout interval: %w", err)
|
||||
}
|
||||
sc.SQLTransactionTimeout = interval
|
||||
sc.EventTimeout = interval
|
||||
}
|
||||
|
||||
if c.Server.Experimental.EventTimeout != "" {
|
||||
interval, err := time.ParseDuration(c.Server.Experimental.EventTimeout)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse event timeout interval: %w", err)
|
||||
}
|
||||
sc.EventTimeout = interval
|
||||
}
|
||||
|
||||
if c.Server.Experimental.EventsBasedCache {
|
||||
|
@ -762,10 +772,7 @@ func setBundleEndpointConfigProfile(config *bundleEndpointConfig, dataDir string
|
|||
return nil
|
||||
case profileConfig.HTTPSWeb.ServingCertFile != nil:
|
||||
federationConfig.BundleEndpoint.DiskCertManager, err = configToDiskCertManager(profileConfig.HTTPSWeb.ServingCertFile, log)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return err
|
||||
default:
|
||||
return errors.New("malformed https_web profile configuration: 'acme' or 'serving_cert_file' is required")
|
||||
}
|
||||
|
@ -904,6 +911,10 @@ func validateConfig(c *Config) error {
|
|||
}
|
||||
}
|
||||
|
||||
if c.Server.Experimental.EventTimeout != "" && c.Server.Experimental.SQLTransactionTimeout != "" {
|
||||
return errors.New("both experimental sql_transaction_timeout and event_timeout set, only set event_timeout")
|
||||
}
|
||||
|
||||
return c.validateOS()
|
||||
}
|
||||
|
||||
|
|
|
@ -1144,6 +1144,24 @@ func TestNewServerConfig(t *testing.T) {
|
|||
require.Nil(t, c)
|
||||
},
|
||||
},
|
||||
{
|
||||
msg: "sql_transaction_timeout is correctly parsed",
|
||||
input: func(c *Config) {
|
||||
c.Server.Experimental.SQLTransactionTimeout = "1m"
|
||||
},
|
||||
test: func(t *testing.T, c *server.Config) {
|
||||
require.Equal(t, time.Minute, c.EventTimeout)
|
||||
},
|
||||
},
|
||||
{
|
||||
msg: "event_timeout is correctly parsed",
|
||||
input: func(c *Config) {
|
||||
c.Server.Experimental.EventTimeout = "1m"
|
||||
},
|
||||
test: func(t *testing.T, c *server.Config) {
|
||||
require.Equal(t, time.Minute, c.EventTimeout)
|
||||
},
|
||||
},
|
||||
{
|
||||
msg: "audit_log_enabled is enabled",
|
||||
input: func(c *Config) {
|
||||
|
@ -1330,6 +1348,14 @@ func TestValidateConfig(t *testing.T) {
|
|||
},
|
||||
expectedErr: `federation.federates_with["domain.test"].bundle_endpoint_url must use the HTTPS protocol; URL found: "http://example.org/test"`,
|
||||
},
|
||||
{
|
||||
name: "can't set both sql_transaction_timeout and event_timeout",
|
||||
applyConf: func(c *Config) {
|
||||
c.Server.Experimental.EventTimeout = "1h"
|
||||
c.Server.Experimental.SQLTransactionTimeout = "1h"
|
||||
},
|
||||
expectedErr: "both experimental sql_transaction_timeout and event_timeout set, only set event_timeout",
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
|
|
|
@ -3,14 +3,16 @@ package token
|
|||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
|
||||
"github.com/mitchellh/cli"
|
||||
"github.com/spiffe/go-spiffe/v2/spiffeid"
|
||||
agentv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/agent/v1"
|
||||
prototypes "github.com/spiffe/spire-api-sdk/proto/spire/api/types"
|
||||
"github.com/spiffe/spire/cmd/spire-server/util"
|
||||
serverutil "github.com/spiffe/spire/cmd/spire-server/util"
|
||||
commoncli "github.com/spiffe/spire/pkg/common/cli"
|
||||
"github.com/spiffe/spire/pkg/common/cliprinter"
|
||||
"github.com/spiffe/spire/pkg/common/util"
|
||||
)
|
||||
|
||||
func NewGenerateCommand() cli.Command {
|
||||
|
@ -18,7 +20,7 @@ func NewGenerateCommand() cli.Command {
|
|||
}
|
||||
|
||||
func newGenerateCommand(env *commoncli.Env) cli.Command {
|
||||
return util.AdaptCommand(env, &generateCommand{env: env})
|
||||
return serverutil.AdaptCommand(env, &generateCommand{env: env})
|
||||
}
|
||||
|
||||
type generateCommand struct {
|
||||
|
@ -39,16 +41,20 @@ func (g *generateCommand) Synopsis() string {
|
|||
return "Generates a join token"
|
||||
}
|
||||
|
||||
func (g *generateCommand) Run(ctx context.Context, _ *commoncli.Env, serverClient util.ServerClient) error {
|
||||
func (g *generateCommand) Run(ctx context.Context, _ *commoncli.Env, serverClient serverutil.ServerClient) error {
|
||||
id, err := getID(g.SpiffeID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ttl, err := util.CheckedCast[int32](g.TTL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid value for TTL: %w", err)
|
||||
}
|
||||
|
||||
c := serverClient.NewAgentClient()
|
||||
resp, err := c.CreateJoinToken(ctx, &agentv1.CreateJoinTokenRequest{
|
||||
AgentId: id,
|
||||
Ttl: int32(g.TTL),
|
||||
Ttl: ttl,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -19,10 +19,11 @@ import (
|
|||
"github.com/spiffe/go-spiffe/v2/spiffeid"
|
||||
bundlev1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/bundle/v1"
|
||||
svidv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/svid/v1"
|
||||
"github.com/spiffe/spire/cmd/spire-server/util"
|
||||
serverutil "github.com/spiffe/spire/cmd/spire-server/util"
|
||||
commoncli "github.com/spiffe/spire/pkg/common/cli"
|
||||
"github.com/spiffe/spire/pkg/common/cliprinter"
|
||||
"github.com/spiffe/spire/pkg/common/diskutil"
|
||||
"github.com/spiffe/spire/pkg/common/util"
|
||||
)
|
||||
|
||||
type generateKeyFunc func() (crypto.Signer, error)
|
||||
|
@ -37,7 +38,7 @@ func newMintCommand(env *commoncli.Env, generateKey generateKeyFunc) cli.Command
|
|||
return ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
}
|
||||
}
|
||||
return util.AdaptCommand(env, &mintCommand{
|
||||
return serverutil.AdaptCommand(env, &mintCommand{
|
||||
generateKey: generateKey,
|
||||
env: env,
|
||||
})
|
||||
|
@ -70,7 +71,7 @@ func (c *mintCommand) AppendFlags(fs *flag.FlagSet) {
|
|||
cliprinter.AppendFlagWithCustomPretty(&c.printer, fs, c.env, c.prettyPrintMint)
|
||||
}
|
||||
|
||||
func (c *mintCommand) Run(ctx context.Context, env *commoncli.Env, serverClient util.ServerClient) error {
|
||||
func (c *mintCommand) Run(ctx context.Context, env *commoncli.Env, serverClient serverutil.ServerClient) error {
|
||||
if c.spiffeID == "" {
|
||||
return errors.New("spiffeID must be specified")
|
||||
}
|
||||
|
@ -80,6 +81,11 @@ func (c *mintCommand) Run(ctx context.Context, env *commoncli.Env, serverClient
|
|||
return err
|
||||
}
|
||||
|
||||
ttl, err := ttlToSeconds(c.ttl)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid value for TTL: %w", err)
|
||||
}
|
||||
|
||||
key, err := c.generateKey()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to generate key: %w", err)
|
||||
|
@ -96,7 +102,7 @@ func (c *mintCommand) Run(ctx context.Context, env *commoncli.Env, serverClient
|
|||
client := serverClient.NewSVIDClient()
|
||||
resp, err := client.MintX509SVID(ctx, &svidv1.MintX509SVIDRequest{
|
||||
Csr: csr,
|
||||
Ttl: ttlToSeconds(c.ttl),
|
||||
Ttl: ttl,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to mint SVID: %w", err)
|
||||
|
@ -167,8 +173,8 @@ func (c *mintCommand) Run(ctx context.Context, env *commoncli.Env, serverClient
|
|||
|
||||
// ttlToSeconds returns the number of seconds in a duration, rounded up to
|
||||
// the nearest second
|
||||
func ttlToSeconds(ttl time.Duration) int32 {
|
||||
return int32((ttl + time.Second - 1) / time.Second)
|
||||
func ttlToSeconds(ttl time.Duration) (int32, error) {
|
||||
return util.CheckedCast[int32]((ttl + time.Second - 1) / time.Second)
|
||||
}
|
||||
|
||||
type mintResult struct {
|
||||
|
|
|
@ -59,6 +59,9 @@ agent {
|
|||
# trust_bundle_url: URL to download the initial SPIRE server trust bundle.
|
||||
# trust_bundle_url = ""
|
||||
|
||||
# trust_bundle_unix_socket: Make the request specified via trust_bundle_url happen against the specified unix socket.
|
||||
# trust_budnle_unix_socket = "/tmp/your-webserver.sock"
|
||||
|
||||
# trust_bundle_format: The format for the initial SPIRE server trust bundle, pem or spiffe
|
||||
# trust_bundle_format = "pem"
|
||||
|
||||
|
@ -108,6 +111,10 @@ agent {
|
|||
# # admin_named_pipe_name: Pipe name to bind the Admin API named pipe (Windows only).
|
||||
# Can be used to access the Debug API and Delegated Identity API.
|
||||
# admin_named_pipe_name = ""
|
||||
|
||||
# # use_sync_authorized_entries: Use SyncAuthorizedEntries API for periodic synchronization
|
||||
# # of authorized entries.
|
||||
# use_sync_authorized_entries = true
|
||||
# }
|
||||
}
|
||||
|
||||
|
@ -204,20 +211,6 @@ plugins {
|
|||
}
|
||||
}
|
||||
|
||||
# NodeAttestor "k8s_sat" (deprecated): A node attestor which attests agent identity
|
||||
# using a Kubernetes Service Account token.
|
||||
NodeAttestor "k8s_sat" {
|
||||
plugin_data {
|
||||
# cluster: Name of the cluster. It must correspond to a cluster
|
||||
# configured in the server plugin.
|
||||
# cluster = ""
|
||||
|
||||
# token_path: Path to the service account token on disk.
|
||||
# Default: /var/run/secrets/kubernetes.io/serviceaccount/token.
|
||||
# token_path = "/var/run/secrets/kubernetes.io/serviceaccount/token"
|
||||
}
|
||||
}
|
||||
|
||||
# NodeAttestor "sshpop": A node attestor which attests agent identity
|
||||
# using an existing ssh certificate.
|
||||
NodeAttestor "sshpop" {
|
||||
|
|
|
@ -533,47 +533,6 @@ plugins {
|
|||
# }
|
||||
# }
|
||||
|
||||
# NodeAttestor "k8s_sat" (deprecated): A node attestor which attests agent identity
|
||||
# using a Kubernetes Service Account token.
|
||||
# NodeAttestor "k8s_sat" {
|
||||
# plugin_data {
|
||||
# # clusters: A map of clusters, keyed by an arbitrary ID, that are
|
||||
# # authorized for attestation.
|
||||
# # clusters = {
|
||||
# # "<arbitrary ID>" = {
|
||||
# # service_account_allow_list: A list of service account names,
|
||||
# # qualified by namespace (for example, "default:blog" or
|
||||
# # "production:web") to allow for node attestation. Attestation
|
||||
# # will be rejected for tokens bound to service accounts that
|
||||
# # aren't in the allow list.
|
||||
# # service_account_allow_list = []
|
||||
|
||||
# # use_token_review_api_validation: Specifies how the service
|
||||
# # account token is validated. If false, validation is done
|
||||
# # locally using the provided key. If true, validation is done
|
||||
# # using token review API. Default: false.
|
||||
# # use_token_review_api_validation = false
|
||||
|
||||
# # service_account_key_file: It is only used if
|
||||
# # use_token_review_api_validation is set to false. Path on disk
|
||||
# # to a PEM encoded file containing public keys used in
|
||||
# # validating tokens for that cluster. RSA and ECDSA keys are
|
||||
# # supported. For RSA, X509 certificates, PKCS1, and PKIX encoded
|
||||
# # public keys are accepted. For ECDSA, X509 certificates, and
|
||||
# # PKIX encoded public keys are accepted.
|
||||
# # service_account_key_file = ""
|
||||
|
||||
# # kube_config_file: It is only used if
|
||||
# # use_token_review_api_validation is set to true. Path to a k8s
|
||||
# # configuration file for API Server authentication. A kubernetes
|
||||
# # configuration file must be specified if SPIRE server runs
|
||||
# # outside of the k8s cluster. If empty, SPIRE server is assumed
|
||||
# # to be running inside the cluster and in-cluster configuration
|
||||
# # is used. Default: "".
|
||||
# # kube_config_file = ""
|
||||
# }
|
||||
# }
|
||||
|
||||
# NodeAttestor "sshpop": A node attestor which attests agent identity
|
||||
# using an existing ssh certificate.
|
||||
# NodeAttestor "sshpop" {
|
||||
|
@ -982,6 +941,29 @@ plugins {
|
|||
# # trust_anchor_id = "153d3e58-cab5-4a59-a0a1-3febad2937c4"
|
||||
# }
|
||||
# }
|
||||
|
||||
# BundlePublisher "k8s_configmap": A bundle publisher that puts the current trust
|
||||
# bundle of the server in a designated Kubernetes ConfigMap, keeping it updated.
|
||||
# BundlePublisher "k8s_configmap" {
|
||||
# plugin_data {
|
||||
# clusters = {
|
||||
# "example-cluster-1" = {
|
||||
# configmap_name = "example.org"
|
||||
# configmap_key = "bundle"
|
||||
# namespace = "spire"
|
||||
# kubeconfig_path = "/file/path/cluster-1"
|
||||
# format = "spiffe"
|
||||
# },
|
||||
# "example-cluster-2" = {
|
||||
# configmap_name = "example.org"
|
||||
# configmap_key = "bundle"
|
||||
# namespace = "spire"
|
||||
# kubeconfig_path = "/file/path/cluster-2"
|
||||
# format = "pem"
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
}
|
||||
|
||||
# telemetry: If telemetry is desired use this section to configure the
|
||||
|
|
|
@ -44,15 +44,16 @@ If you don't already have Docker installed, please follow these [installation in
|
|||
$ make dev-shell
|
||||
```
|
||||
|
||||
3. Create a user with uid 1000. The uid will be registered as a selector of the workload's SPIFFE ID. During kernel based attestation the workload process will be interrogated for the registered uid.
|
||||
3. Create a user with uid 1001. The uid will be registered as a selector of the workload's SPIFFE ID. During kernel based attestation the workload process will be interrogated for the registered uid.
|
||||
|
||||
```shell
|
||||
(in dev shell) # useradd -u 1000 workload
|
||||
(in dev shell) # useradd -u 1001 workload
|
||||
```
|
||||
|
||||
4. Build SPIRE by running the **build** target. The build target builds all the SPIRE binaries.
|
||||
4. Build SPIRE by running the **build** target. The build target builds all the SPIRE binaries. This requires configuring `git` to know that the temporary docker container is safe.
|
||||
|
||||
```shell
|
||||
(in dev shell) # git config --global --add safe.directory /spire
|
||||
(in dev shell) # make build
|
||||
```
|
||||
|
||||
|
@ -169,12 +170,12 @@ If you don't already have Docker installed, please follow these [installation in
|
|||
(in dev shell) # ./bin/spire-server entry create \
|
||||
-parentID spiffe://example.org/host \
|
||||
-spiffeID spiffe://example.org/workload \
|
||||
-selector unix:uid:1000
|
||||
-selector unix:uid:1001
|
||||
```
|
||||
|
||||
At this point, the target workload has been registered with the SPIRE Server. We can now call the Workload API using a command line program to request the workload SVID from the SPIRE Agent.
|
||||
|
||||
12. Simulate the Workload API interaction and retrieve the workload SVID bundle by running the `api` subcommand in the agent. Run the command as user **_workload_** created in step #3 with uid 1000
|
||||
12. Simulate the Workload API interaction and retrieve the workload SVID bundle by running the `api` subcommand in the agent. Run the command as user **_workload_** created in step #3 with uid 1001
|
||||
|
||||
```shell
|
||||
(in dev shell) # su -c "./bin/spire-agent api fetch x509 " workload
|
||||
|
@ -183,6 +184,6 @@ If you don't already have Docker installed, please follow these [installation in
|
|||
13. Examine the output. Optionally, you may write the SVID and key to disk with `-write` in order to examine them in detail.
|
||||
|
||||
```shell
|
||||
(in dev shell) # su -c "./bin/spire-agent api fetch x509 -write ./" workload
|
||||
(in dev shell) # openssl x509 -in ./svid.0.pem -text -noout
|
||||
(in dev shell) # su -c "./bin/spire-agent api fetch x509 -write /tmp" workload
|
||||
(in dev shell) # openssl x509 -in /tmp/svid.0.pem -text -noout
|
||||
```
|
||||
|
|
|
@ -18,6 +18,7 @@ server {
|
|||
local {
|
||||
rego_path = "./conf/server/policy.rego"
|
||||
policy_data_path = "./conf/server/policy_data.json"
|
||||
use_rego_v1 = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,50 +0,0 @@
|
|||
# Agent plugin: NodeAttestor "k8s_sat" (deprecated)
|
||||
|
||||
**This plugin has been deprecated in favor of the ["k8s_psat"](plugin_agent_nodeattestor_k8s_psat.md) plugin and will be removed in a future release.**
|
||||
|
||||
*Must be used in conjunction with the server-side k8s_sat plugin*
|
||||
|
||||
The `k8s_sat` plugin attests nodes running in inside of Kubernetes. The agent
|
||||
reads and provides the signed service account token to the server.
|
||||
|
||||
*Note: If your cluster supports [Service Account Token Volume Projection](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#service-account-token-volume-projection)
|
||||
you should instead consider using the `k8s_psat` attestor due to the [security considerations](#security-considerations) below.*
|
||||
|
||||
The server-side `k8s_sat` plugin generates a one-time UUID and generates a SPIFFE ID with the form:
|
||||
|
||||
```xml
|
||||
spiffe://<trust_domain>/spire/agent/k8s_sat/<cluster>/<UUID>
|
||||
```
|
||||
|
||||
The main configuration accepts the following values:
|
||||
|
||||
| Configuration | Description | Default |
|
||||
|---------------|---------------------------------------------------------------------------------------|-------------------------------------------------------|
|
||||
| `cluster` | Name of the cluster. It must correspond to a cluster configured in the server plugin. |
|
||||
| `token_path` | Path to the service account token on disk | "/var/run/secrets/kubernetes.io/serviceaccount/token" |
|
||||
|
||||
The token path defaults to the default location Kubernetes uses to place the token and should not need to be overridden in most cases.
|
||||
|
||||
A sample configuration with the default token path:
|
||||
|
||||
```hcl
|
||||
NodeAttestor "k8s_sat" {
|
||||
plugin_data {
|
||||
cluster = "MyCluster"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
At this time, the service account token does not contain claims that could be
|
||||
used to strongly identify the node/daemonset/pod running the agent. This means
|
||||
that any container running in an allowed service account can masquerade as
|
||||
an agent, giving it access to any identity the agent is capable of issuing. It
|
||||
is **STRONGLY** recommended that agents run under a dedicated service account.
|
||||
|
||||
It should be noted that due to the fact that SPIRE can't positively
|
||||
identify a node using this method, it is not possible to directly authorize
|
||||
identities for a distinct node or sets of nodes. Instead, this must be
|
||||
accomplished indirectly using a service account and deployment that
|
||||
leverages node affinity or node selectors.
|
|
@ -122,7 +122,7 @@ ghcr.io/spiffe/spire-server 1.9.1 e3b24c3cd9e1 4 weeks ago 10
|
|||
envoyproxy/envoy contrib-v1.29.1 644f45f6626c 7 weeks ago 181MB
|
||||
```
|
||||
|
||||
Then u4se the `REPOSITORY:TAG` as the selector, not the `IMAGE ID` column.
|
||||
Then use the `REPOSITORY:TAG` as the selector, not the `IMAGE ID` column.
|
||||
|
||||
```shell
|
||||
$ spire-server entry create \
|
||||
|
|
|
@ -1,7 +1,10 @@
|
|||
# Server plugin: BundlePublisher "aws_rolesanywhere_trustanchor"
|
||||
|
||||
> [!WARNING]
|
||||
> This plugin is only supported when an UpstreamAuthority plugin is used.
|
||||
> AWS Roles Anywhere only allows configuring up to two CAs per trust anchor. If you are using this plugin, you will
|
||||
> need to make sure there are at most 2 CAs in the trust bundle for the trust domain, otherwise publishing the bundle
|
||||
> will fail. This can be achieved by configuring the spire-server with an `UpstreamAuthority` plugin.
|
||||
> Also, keep in mind that expired CAs are only removed from the bundle 24 hours after their expiration.
|
||||
|
||||
The `aws_rolesanywhere_trustanchor` plugin puts the current trust bundle of the server
|
||||
in a trust anchor, keeping it updated.
|
||||
|
|
|
@ -13,6 +13,7 @@ The plugin accepts the following configuration options:
|
|||
| bucket | The Amazon S3 bucket name to which the trust bundle is uploaded. | Yes. | |
|
||||
| object_key | The object key inside the bucket. | Yes. | |
|
||||
| format | Format in which the trust bundle is stored, <spiffe | jwks | pem>. See [Supported bundle formats](#supported-bundle-formats) for more details. | Yes. | |
|
||||
| endpoint | A custom S3 endpoint should be set when using third-party object storage providers, such as Minio. | No. | |
|
||||
|
||||
## Supported bundle formats
|
||||
|
||||
|
@ -48,3 +49,19 @@ The following configuration uploads the local trust bundle contents to the `exam
|
|||
}
|
||||
}
|
||||
```
|
||||
|
||||
The following configuration uploads the local trust bundle contents to the `example.org` object in the `spire-trust-bundle` bucket on Minio server.
|
||||
|
||||
```hcl
|
||||
BundlePublisher "aws_s3" {
|
||||
plugin_data {
|
||||
endpoint = "https://my-org-minio.example.org"
|
||||
region = "minio-sample-region"
|
||||
access_key_id = "minio-key-id"
|
||||
secret_access_key = "minio-access-key"
|
||||
bucket = "spire-trust-bundle"
|
||||
object_key = "example.org"
|
||||
format = "spiffe"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
|
|
@ -0,0 +1,127 @@
|
|||
# Server plugin: BundlePublisher "k8s_configmap"
|
||||
|
||||
The `k8s_configmap` plugin puts the current trust bundle of the server in a designated
|
||||
Kubernetes ConfigMap, keeping it updated. The plugin supports configuring multiple clusters.
|
||||
|
||||
The plugin accepts the following configuration:
|
||||
|
||||
| Configuration | Description | Default |
|
||||
|---------------|---------------------------------------------------------------------------------------------------|---------|
|
||||
| `clusters` | A map of clusters, keyed by an arbitrary ID, where the plugin publishes the current trust bundle. | |
|
||||
|
||||
> [!WARNING]
|
||||
> When `clusters` is empty, the plugin does not publish the bundle.
|
||||
|
||||
Each cluster in the main configuration has the following configuration options:
|
||||
|
||||
| Configuration | Description | Required | Default |
|
||||
|-----------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|---------|
|
||||
| configmap_name | The name of the ConfigMap. | Yes. | |
|
||||
| configmap_key | The key within the ConfigMap for the bundle. | Yes. | |
|
||||
| namespace | The namespace containing the ConfigMap. | Yes. | |
|
||||
| kubeconfig_path | The path on disk to the kubeconfig containing configuration to enable interaction with the Kubernetes API server. If unset, in-cluster credentials will be used. | No. | |
|
||||
| format | Format in which the trust bundle is stored, <spiffe | jwks | pem>. See [Supported bundle formats](#supported-bundle-formats) for more details. | Yes. | |
|
||||
|
||||
## Supported bundle formats
|
||||
|
||||
The following bundle formats are supported:
|
||||
|
||||
### SPIFFE format
|
||||
|
||||
The trust bundle is represented as an RFC 7517 compliant JWK Set, with the specific parameters defined in the [SPIFFE Trust Domain and Bundle specification](https://github.com/spiffe/spiffe/blob/main/standards/SPIFFE_Trust_Domain_and_Bundle.md#4-spiffe-bundle-format). Both the JWT authorities and the X.509 authorities are included.
|
||||
|
||||
### JWKS format
|
||||
|
||||
The trust bundle is encoded as an RFC 7517 compliant JWK Set, omitting SPIFFE-specific parameters. Both the JWT authorities and the X.509 authorities are included.
|
||||
|
||||
### PEM format
|
||||
|
||||
The trust bundle is formatted using PEM encoding. Only the X.509 authorities are included.
|
||||
|
||||
## Configuring Kubernetes
|
||||
|
||||
To use this plugin, configure Kubernetes permissions for the SPIRE Server's Service Account:
|
||||
|
||||
- For in-cluster SPIRE servers: grant permissions to the Service Account running SPIRE.
|
||||
- For out-of-cluster SPIRE servers: grant permissions to the Service Account specified in the kubeconfig.
|
||||
|
||||
The plugin uses the Kubernetes Apply operation to manage ConfigMaps. This operation will create the ConfigMap if it doesn't exist, or update it if it does. The Service Account needs permission to use the `patch` verb on ConfigMaps in the specified namespace.
|
||||
|
||||
### Required Permissions
|
||||
|
||||
The Service Account needs the following permissions:
|
||||
|
||||
- `get` on ConfigMaps (required for the Apply operation to read the current state)
|
||||
- `patch` on ConfigMaps (required for the Apply operation to update resources)
|
||||
- `create` on ConfigMaps (required if the ConfigMap doesn't exist)
|
||||
|
||||
### Example
|
||||
|
||||
In this example, assume that Service Account is `spire-server`.
|
||||
|
||||
```yaml
|
||||
kind: Role # Note: Using Role instead of ClusterRole for namespace-scoped permissions
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: spire-server-role
|
||||
namespace: spire
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["create", "get", "patch"]
|
||||
resourceNames: ["spire-bundle"] # Restrict to specific ConfigMap for create, get and patch operations
|
||||
|
||||
---
|
||||
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: spire-server-role-binding
|
||||
namespace: spire
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: spire-server
|
||||
namespace: spire
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: spire-server-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: spire-bundle
|
||||
namespace: spire
|
||||
```
|
||||
|
||||
> [!NOTE]
|
||||
> The Apply operation uses Server-Side Apply (SSA) with a field manager name of `spire-bundlepublisher-k8s_configmap`. This ensures that SPIRE's updates to the ConfigMap are tracked and can coexist with other controllers that might be managing different fields of the same ConfigMap.
|
||||
|
||||
## Sample configuration
|
||||
|
||||
The following configuration keeps the local trust bundle updated in ConfigMaps from two different clusters.
|
||||
|
||||
```hcl
|
||||
BundlePublisher "k8s_configmap" {
|
||||
plugin_data {
|
||||
clusters = {
|
||||
"example-cluster-1" = {
|
||||
configmap_name = "example.org"
|
||||
configmap_key = "bundle"
|
||||
namespace = "spire"
|
||||
kubeconfig_path = "/file/path/cluster-1"
|
||||
format = "spiffe"
|
||||
},
|
||||
"example-cluster-2" = {
|
||||
configmap_name = "example.org"
|
||||
configmap_key = "bundle"
|
||||
namespace = "spire"
|
||||
kubeconfig_path = "/file/path/cluster-2"
|
||||
format = "pem"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
|
@ -11,13 +11,15 @@ The `sql` plugin implements SQL based data storage for the SPIRE server using SQ
|
|||
| client_cert_path | Path to client certificate (MySQL only) |
|
||||
| client_key_path | Path to private key for client certificate (MySQL only) |
|
||||
| max_open_conns | The maximum number of open db connections (default: 100) |
|
||||
| max_idle_conns | The maximum number of idle connections in the pool (default: 2) |
|
||||
| max_idle_conns | The maximum number of idle connections in the pool (default: 100) |
|
||||
| conn_max_lifetime | The maximum amount of time a connection may be reused (default: unlimited) |
|
||||
| disable_migration | True to disable auto-migration functionality. Use of this flag allows finer control over when datastore migrations occur and coordination of the migration of a datastore shared with a SPIRE Server cluster. Only available for databases from SPIRE Code version 0.9.0 or later. |
|
||||
|
||||
For more information on the `max_open_conns`, `max_idle_conns`, and `conn_max_lifetime`, refer to the
|
||||
documentation for the Go [`database/sql`](https://golang.org/pkg/database/sql/#DB) package.
|
||||
|
||||
> **Note:** The SQL plugin uses an internal default setting of 30 seconds for the maximum idle time per connection (ConnMaxIdleTime). This setting is not configurable through the plugin configuration.
|
||||
|
||||
## Database configurations
|
||||
|
||||
### `database_type = "sqlite3"`
|
||||
|
|
|
@ -1,105 +0,0 @@
|
|||
# Server plugin: NodeAttestor "k8s_sat" (deprecated)
|
||||
|
||||
**This plugin has been deprecated in favor of the ["k8s_psat"](plugin_server_nodeattestor_k8s_psat.md) plugin and will be removed in a future release.**
|
||||
|
||||
*Must be used in conjunction with the agent-side k8s_sat plugin*
|
||||
|
||||
The `k8s_sat` plugin attests nodes running in inside of Kubernetes. The server validates the signed service
|
||||
account token provided by the agent. This validation can be done in two different ways depending on the value
|
||||
of the `use_token_review_api_validation` flag:
|
||||
|
||||
+ If this value is set to `false` (default behavior), the attestor validates the token locally using the key provided in `service_account_key_file`.
|
||||
+ If this value is set to `true`, the validation is performed using the Kubernetes [Token Review API](https://kubernetes.io/docs/reference/kubernetes-api/authentication-resources/token-review-v1/).
|
||||
|
||||
*Note: If your cluster supports [Service Account Token Volume Projection](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#service-account-token-volume-projection)
|
||||
you should instead consider using the `k8s_psat` attestor due to the [security considerations](#security-considerations) below.*
|
||||
|
||||
The server uses a one-time UUID provided by the agent to generate a SPIFFE ID with the form:
|
||||
|
||||
```xml
|
||||
spiffe://<trust_domain>/spire/agent/k8s_sat/<cluster>/<UUID>
|
||||
```
|
||||
|
||||
The server does not need to be running in Kubernetes in order to perform node
|
||||
attestation. In fact, the plugin can be configured to attest nodes running in
|
||||
multiple clusters.
|
||||
|
||||
The main configuration accepts the following values:
|
||||
|
||||
| Configuration | Description | Default |
|
||||
|---------------|-----------------------------------------------------------------------------------|---------|
|
||||
| `clusters` | A map of clusters, keyed by an arbitrary ID, that are authorized for attestation. | |
|
||||
|
||||
Each cluster in the main configuration requires the following configuration:
|
||||
|
||||
| Configuration | Description | Default |
|
||||
|-----------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------|
|
||||
| `service_account_allow_list` | A list of service account names, qualified by namespace (for example, "default:blog" or "production:web") to allow for node attestation. Attestation will be rejected for tokens bound to service accounts that aren't in the allow list. | |
|
||||
| `use_token_review_api_validation` | Specifies how the service account token is validated. If false, validation is done locally using the provided key. If true, validation is done using token review API. | false |
|
||||
| `service_account_key_file` | It is only used if `use_token_review_api_validation` is set to `false`. Path on disk to a PEM encoded file containing public keys used in validating tokens for that cluster. RSA and ECDSA keys are supported. For RSA, X509 certificates, PKCS1, and PKIX encoded public keys are accepted. For ECDSA, X509 certificates, and PKIX encoded public keys are accepted. | |
|
||||
| `kube_config_file` | It is only used if `use_token_review_api_validation` is set to `true`. Path to a k8s configuration file for API Server authentication. A Kubernetes configuration file must be specified if SPIRE server runs outside of the k8s cluster. If empty, SPIRE server is assumed to be running inside the cluster and in-cluster configuration is used. | "" |
|
||||
|
||||
A sample configuration for SPIRE server running inside or outside a Kubernetes cluster and validating the service account token with a key file located at `"/run/k8s-certs/sa.pub"`:
|
||||
|
||||
```hcl
|
||||
NodeAttestor "k8s_sat" {
|
||||
plugin_data {
|
||||
clusters = {
|
||||
"MyCluster" = {
|
||||
service_account_allow_list = ["production:spire-agent"]
|
||||
service_account_key_file = "/run/k8s-certs/sa.pub"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
A sample configuration for SPIRE server running inside of a Kubernetes cluster and validating the service account token with the kubernetes token review API:
|
||||
|
||||
```hcl
|
||||
NodeAttestor "k8s_sat" {
|
||||
plugin_data {
|
||||
clusters = {
|
||||
"MyCluster" = {
|
||||
service_account_allow_list = ["production:spire-agent"]
|
||||
use_token_review_api_validation = true
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
A sample configuration for SPIRE server running outside of a Kubernetes cluster and validating the service account token with the kubernetes token review API:
|
||||
|
||||
```hcl
|
||||
NodeAttestor "k8s_sat" {
|
||||
plugin_data {
|
||||
clusters = {
|
||||
"MyCluster" = {
|
||||
service_account_allow_list = ["production:spire-agent"]
|
||||
use_token_review_api_validation = true
|
||||
kube_config_file = "path/to/kubeconfig/file"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
In addition, this plugin generates the following selectors:
|
||||
|
||||
| Selector | Example | Description |
|
||||
|--------------------|--------------------------------|---------------------------------------------------------------------------------|
|
||||
| `k8s_sat:cluster` | `k8s_sat:cluster:MyCluster` | Name of the cluster (from the plugin config) used to verify the token signature |
|
||||
| `k8s_sat:agent_ns` | `k8s_sat:agent_ns:production` | Namespace that the agent is running under |
|
||||
| `k8s_sat:agent_sa` | `k8s_sat:agent_sa:spire-agent` | Service Account the agent is running under |
|
||||
|
||||
## Security Considerations
|
||||
|
||||
At this time, the service account token does not contain claims that could be
|
||||
used to strongly identify the node/daemonset/pod running the agent. This means
|
||||
that any container running in an allowed service account can masquerade as
|
||||
an agent, giving it access to any identity the agent is capable of issuing. It
|
||||
is **STRONGLY** recommended that agents run under a dedicated service account.
|
||||
|
||||
It should be noted that due to the fact that SPIRE can't positively
|
||||
identify a node using this method, it is not possible to directly authorize
|
||||
identities for a distinct node or sets of nodes. Instead, this must be
|
||||
accomplished indirectly using a service account and deployment that
|
||||
leverages node affinity or node selectors.
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
The `x509pop` plugin attests nodes that have been provisioned with an x509
|
||||
identity through an out-of-band mechanism. It verifies that the certificate is
|
||||
rooted to a trusted set of CAs and issues a signature based proof-of-possession
|
||||
rooted to a trusted set of CAs and issues a signature-based proof-of-possession
|
||||
challenge to the agent plugin to verify that the node is in possession of the
|
||||
private key.
|
||||
|
||||
|
@ -22,7 +22,7 @@ spiffe://<trust_domain>/spire/agent/x509pop/<fingerprint>
|
|||
| `svid_prefix` | The prefix of the SVID to use for matching valid SVIDS and exchanging them for Node SVIDs | /spire-exchange |
|
||||
| `ca_bundle_path` | The path to the trusted CA bundle on disk. The file must contain one or more PEM blocks forming the set of trusted root CA's for chain-of-trust verification. If the CA certificates are in more than one file, use `ca_bundle_paths` instead. | |
|
||||
| `ca_bundle_paths` | A list of paths to trusted CA bundles on disk. The files must contain one or more PEM blocks forming the set of trusted root CA's for chain-of-trust verification. | |
|
||||
| `agent_path_template` | A URL path portion format of Agent's SPIFFE ID. Describe in text/template format. | `See [Agent Path Template](#agent-path-template) for details` |
|
||||
| `agent_path_template` | A URL path portion format of Agent's SPIFFE ID. Describe in text/template format. | See [Agent Path Template](#agent-path-template) for details |
|
||||
|
||||
A sample configuration:
|
||||
|
||||
|
@ -30,7 +30,7 @@ A sample configuration:
|
|||
NodeAttestor "x509pop" {
|
||||
plugin_data {
|
||||
ca_bundle_path = "/opt/spire/conf/server/agent-cacert.pem"
|
||||
|
||||
|
||||
# Change the agent's SPIFFE ID format
|
||||
# agent_path_template = "/cn/{{ .Subject.CommonName }}"
|
||||
}
|
||||
|
@ -39,34 +39,29 @@ A sample configuration:
|
|||
|
||||
## Selectors
|
||||
|
||||
| Selector | Example | Description |
|
||||
|------------------|-------------------------------------------------------------------|------------------------------------------------------------------------------------------|
|
||||
| Common Name | `x509pop:subject:cn:example.org` | The Subject's Common Name (see X.500 Distinguished Names) |
|
||||
| SHA1 Fingerprint | `x509pop:ca:fingerprint:0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33` | The SHA1 fingerprint as a hex string for each cert in the PoP chain, excluding the leaf. |
|
||||
| SerialNumber | `x509pop:serialnumber:0a1b2c3d4e5f` | The leaf certificate serial number as a lowercase hexadecimal string |
|
||||
| Selector | Example | Description |
|
||||
|------------------|-------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| Common Name | `x509pop:subject:cn:example.org` | The Subject's Common Name (see X.500 Distinguished Names) |
|
||||
| SHA1 Fingerprint | `x509pop:ca:fingerprint:0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33` | The SHA1 fingerprint as a hex string for each cert in the PoP chain, excluding the leaf. |
|
||||
| SerialNumber | `x509pop:serialnumber:0a1b2c3d4e5f` | The leaf certificate serial number as a lowercase hexadecimal string |
|
||||
| San | `x509pop:san:<key>:<value>` | The san selectors on the leaf certificate. The expected format of the uri san is `x509pop://<trust_domain>/<key>:<value>`. One selector is exposed per uri san corresponding to x509pop uri scheme. string |
|
||||
|
||||
## SVID Path Prefix
|
||||
|
||||
When mode="spiffe", the SPIFFE ID being exchanged must be prefixed by the specified svid_prefix. The prefix will be removed from the .SVIDPathTrimmed property before sending to the
|
||||
agent path template. If set to "", all prefixes are allowed and you will want to do limiting logic in in the agent_path_template.
|
||||
When `mode="spiffe"` the SPIFFE ID being exchanged must be prefixed by the specified `svid_prefix`. The prefix will be removed from the `.SVIDPathTrimmed` property before sending to the agent path template. If `svid_prefix` is set to `""`, all prefixes will be allowed, and the limiting logic will have to be implemented in the `agent_path_template`.
|
||||
|
||||
Example: if your trust domain is example.com and svid_prefix = the default of /spire-exchange, and agent path template is the default,
|
||||
|
||||
spiffe://example.com/spire-exchange/testhost will render out to spiffe://example.com/spire/agent/x509pop/testhost
|
||||
|
||||
If spiffe://example.com/other/testhost is given, it wont match the svid_prefix and it will be rejected.
|
||||
**Example:** If your trust domain is example.com and `svid_prefix` is set to its default value `/spire-exchange`, and [agent_path_template](#agent-path-template) is the default too, then the SPIFFE ID from the x509 identity `spiffe://example.com/spire-exchange/testhost` will be exchanged for `spiffe://example.com/spire/agent/x509pop/testhost`. If a SPIFFE ID with a different prefix is given, for example `spiffe://example.com/other/testhost`, it will not match the `svid_prefix` and will be rejected.
|
||||
|
||||
## Agent Path Template
|
||||
|
||||
The agent path template is a way of customizing the format of generated SPIFFE IDs for agents.
|
||||
Specifying the value of `agent_path_template` provides a way of customizing the format of generated SPIFFE IDs for agents. The default format for every mode is shown below
|
||||
|
||||
If using ca_bundle_path(s), the default is:
|
||||
"{{ .PluginName }}/{{ .Fingerprint }}"
|
||||
| `mode` | `agent_path_template` |
|
||||
|----------------|--------------------------------------------|
|
||||
| `spiffe` | `{{ .PluginName }}/{{ .SVIDPathTrimmed }}` |
|
||||
| `external_pki` | `{{ .PluginName }}/{{ .Fingerprint }}` |
|
||||
|
||||
If using spire_trust_bundle, the default exchanges an SVID under `/spire-exchange/*` for `/spire/agent/x509pop/*`, via:
|
||||
"{{ .PluginName }}/{{ .SVIDPathTrimmed }}"
|
||||
|
||||
The template formatter is using Golang text/template conventions, it can reference values provided by the plugin or in a [golang x509.Certificate](https://pkg.go.dev/crypto/x509#Certificate)
|
||||
The template formatter is using Golang text/template conventions. It can reference values provided by the plugin or in a [golang x509.Certificate](https://pkg.go.dev/crypto/x509#Certificate).
|
||||
Details about the template engine are available [here](template_engine.md).
|
||||
|
||||
Some useful values are:
|
||||
|
|
|
@ -21,7 +21,6 @@ This document is a configuration reference for SPIRE Agent. It includes informat
|
|||
| NodeAttestor | [azure_msi](/doc/plugin_agent_nodeattestor_azure_msi.md) | A node attestor which attests agent identity using an Azure MSI token |
|
||||
| NodeAttestor | [gcp_iit](/doc/plugin_agent_nodeattestor_gcp_iit.md) | A node attestor which attests agent identity using a GCP Instance Identity Token |
|
||||
| NodeAttestor | [join_token](/doc/plugin_agent_nodeattestor_jointoken.md) | A node attestor which uses a server-generated join token |
|
||||
| NodeAttestor | [k8s_sat](/doc/plugin_agent_nodeattestor_k8s_sat.md) (deprecated) | A node attestor which attests agent identity using a Kubernetes Service Account token |
|
||||
| NodeAttestor | [k8s_psat](/doc/plugin_agent_nodeattestor_k8s_psat.md) | A node attestor which attests agent identity using a Kubernetes Projected Service Account token |
|
||||
| NodeAttestor | [sshpop](/doc/plugin_agent_nodeattestor_sshpop.md) | A node attestor which attests agent identity using an existing ssh certificate |
|
||||
| NodeAttestor | [x509pop](/doc/plugin_agent_nodeattestor_x509pop.md) | A node attestor which attests agent identity using an existing X.509 certificate |
|
||||
|
@ -66,6 +65,7 @@ This may be useful for templating configuration files, for example across differ
|
|||
| `sds` | Optional SDS configuration section | |
|
||||
| `trust_bundle_path` | Path to the SPIRE server CA bundle | |
|
||||
| `trust_bundle_url` | URL to download the initial SPIRE server trust bundle | |
|
||||
| `trust_bundle_unix_socket` | Make the request specified via trust_bundle_url happen against the specified unix socket. | |
|
||||
| `trust_bundle_format` | Format of the initial trust bundle, pem or spiffe | pem |
|
||||
| `trust_domain` | The trust domain that this agent belongs to (should be no more than 255 characters) | |
|
||||
| `workload_x509_svid_key_type` | The workload X509 SVID key type <rsa-2048|ec-p256> | ec-p256 |
|
||||
|
@ -77,15 +77,17 @@ This may be useful for templating configuration files, for example across differ
|
|||
|:------------------------------|--------------------------------------------------------------------------------------|-------------------------|
|
||||
| `named_pipe_name` | Pipe name to bind the SPIRE Agent API named pipe (Windows only) | \spire-agent\public\api |
|
||||
| `sync_interval` | Sync interval with SPIRE server with exponential backoff | 5 sec |
|
||||
| `use_sync_authorized_entries` | Use SyncAuthorizedEntries API for periodically synchronization of authorized entries | false |
|
||||
| `use_sync_authorized_entries` | Use SyncAuthorizedEntries API for periodically synchronization of authorized entries | true |
|
||||
| `require_pq_kem` | Require use of a post-quantum-safe key exchange method for TLS handshakes | false |
|
||||
|
||||
### Initial trust bundle configuration
|
||||
|
||||
The agent needs an initial trust bundle in order to connect securely to the SPIRE server. There are three options:
|
||||
The agent needs an initial trust bundle in order to connect securely to the SPIRE server. There are four options:
|
||||
|
||||
1. If the `trust_bundle_path` option is used, the agent will read the initial trust bundle from the file at that path. You need to copy or share the file before starting the SPIRE agent.
|
||||
2. If the `trust_bundle_url` option is used, the agent will read the initial trust bundle from the specified URL. **The URL must start with `https://` for security, and the server must have a valid certificate (verified with the system trust store).** This can be used to rapidly deploy SPIRE agents without having to manually share a file. Keep in mind the contents of the URL need to be kept up to date.
|
||||
2. If the `trust_bundle_url` option is used, the agent will read the initial trust bundle from the specified URL.
|
||||
1. If trust_bundle_unix_socket is unset, **The URL must start with `https://` for security, and the server must have a valid certificate (verified with the system trust store).** This can be used to rapidly deploy SPIRE agents without having to manually share a file. Keep in mind the contents of the URL need to be kept up to date.
|
||||
2. If trust_bundle_unix_socket is set, **The URL must start with `http://`.** This can be used along with a local service running on the socket to fetch up to date trust bundles via some site specific, secure meachanism.
|
||||
3. If the `insecure_bootstrap` option is set to `true`, then the agent will not use an initial trust bundle. It will connect to the SPIRE server without authenticating it. This is not a secure configuration, because a man-in-the-middle attacker could control the SPIRE infrastructure. It is included because it is a useful option for testing and development.
|
||||
|
||||
Only one of these three options may be set at a time.
|
||||
|
|
|
@ -27,7 +27,6 @@ This document is a configuration reference for SPIRE Server. It includes informa
|
|||
| NodeAttestor | [azure_msi](/doc/plugin_server_nodeattestor_azure_msi.md) | A node attestor which attests agent identity using an Azure MSI token |
|
||||
| NodeAttestor | [gcp_iit](/doc/plugin_server_nodeattestor_gcp_iit.md) | A node attestor which attests agent identity using a GCP Instance Identity Token |
|
||||
| NodeAttestor | [join_token](/doc/plugin_server_nodeattestor_jointoken.md) | A node attestor which validates agents attesting with server-generated join tokens |
|
||||
| NodeAttestor | [k8s_sat](/doc/plugin_server_nodeattestor_k8s_sat.md) (deprecated) | A node attestor which attests agent identity using a Kubernetes Service Account token |
|
||||
| NodeAttestor | [k8s_psat](/doc/plugin_server_nodeattestor_k8s_psat.md) | A node attestor which attests agent identity using a Kubernetes Projected Service Account token |
|
||||
| NodeAttestor | [sshpop](/doc/plugin_server_nodeattestor_sshpop.md) | A node attestor which attests agent identity using an existing ssh certificate |
|
||||
| NodeAttestor | [tpm_devid](/doc/plugin_server_nodeattestor_tpm_devid.md) | A node attestor which attests agent identity using a TPM that has been provisioned with a DevID certificate |
|
||||
|
@ -82,7 +81,7 @@ This may be useful for templating configuration files, for example across differ
|
|||
| `ratelimit` | Rate limiting configurations, usually used when the server is behind a load balancer (see below) | |
|
||||
| `socket_path` | Path to bind the SPIRE Server API socket to (Unix only) | /tmp/spire-server/private/api.sock |
|
||||
| `trust_domain` | The trust domain that this server belongs to (should be no more than 255 characters) | |
|
||||
| `use_legacy_downstream_x509_ca_ttl` | Use the downstream spire-server registration entry TTL as the downstream CA TTL. This is deprecated and will be removed in a future version. | true |
|
||||
| `use_legacy_downstream_x509_ca_ttl` | Use the downstream spire-server registration entry TTL as the downstream CA TTL. This is deprecated and will be removed in a future version. | false |
|
||||
|
||||
| ca_subject | Description | Default |
|
||||
|:----------------------------|--------------------------------|----------------|
|
||||
|
@ -95,10 +94,10 @@ This may be useful for templating configuration files, for example across differ
|
|||
| `cache_reload_interval` | The amount of time between two reloads of the in-memory entry cache. Increasing this will mitigate high database load for extra large deployments, but will also slow propagation of new or updated entries to agents. | 5s |
|
||||
| `events_based_cache` | Use events to update the cache with what's changed since the last update. Enabling this will reduce overhead on the database. | false |
|
||||
| `prune_events_older_than` | How old an event can be before being deleted. Used with events based cache. Decreasing this will keep the events table smaller, but will increase risk of missing an event if connection to the database is down. | 12h |
|
||||
| `sql_transaction_timeout` | Maximum time an SQL transaction could take, used by the events based cache to determine when an event id is unlikely to be used anymore. | 24h |
|
||||
| `event_timeout` | Maximum time to wait for an event to come in before giving up. | 15m |
|
||||
| `auth_opa_policy_engine` | The [auth opa_policy engine](/doc/authorization_policy_engine.md) used for authorization decisions | default SPIRE authorization policy |
|
||||
| `named_pipe_name` | Pipe name of the SPIRE Server API named pipe (Windows only) | \spire-server\private\api |
|
||||
| `require_pq_kem` | Require use of a post-quantum-safe key exchange method for TLS handshakes | false |
|
||||
| `require_pq_kem` | Require use of a post-quantum-safe key exchange method for TLS handshakes | false |
|
||||
|
||||
| ratelimit | Description | Default |
|
||||
|:--------------|----------------------------------------------------------------------------------------------------------------------------------------------------|---------|
|
||||
|
@ -109,10 +108,11 @@ This may be useful for templating configuration files, for example across differ
|
|||
|:-----------------------|---------------------------------------------------|---------|
|
||||
| `local` | Local OPA configuration for authorization policy. | |
|
||||
|
||||
| auth_opa_policy_engine.local | Description | Default |
|
||||
|:------------------------------|----------------------------------------------------------|----------------|
|
||||
| `rego_path` | File to retrieve OPA rego policy for authorization. | |
|
||||
| `policy_data_path` | File to retrieve databindings for policy evaluation. | |
|
||||
| auth_opa_policy_engine.local | Description | Default |
|
||||
|:------------------------------|-------------------------------------------------------------------------------------------|----------------|
|
||||
| `rego_path` | File to retrieve OPA rego policy for authorization. | |
|
||||
| `policy_data_path` | File to retrieve databindings for policy evaluation. | |
|
||||
| `use_rego_v1` | Use rego V1 when evaluating the policy. This will become the default in a future release. | false |
|
||||
|
||||
### Profiling Names
|
||||
|
||||
|
@ -261,7 +261,7 @@ When setting a `bundle_endpoint`, it is `required` to specify the bundle profile
|
|||
|
||||
Allowed profiles:
|
||||
|
||||
- `https_web` allow to configure either the [Automated Certificate Management Environment](#Configuration options for `federation.bundle_endpoint.profile "https_web".acme`) or the [serving cert file](#Configure options for 'federation.bundle_endpoint.porfile "https_web".serving_cert_file') section.
|
||||
- `https_web` allow to configure either the [Automated Certificate Management Environment](#configuration-options-for-federationbundle_endpointprofile-https_webacme) or the [serving cert file](#configuration-options-for-federationbundle_endpointprofile-https_webserving_cert_file) section.
|
||||
- `https_spiffe`
|
||||
|
||||
### Configuration options for `federation.bundle_endpoint.profile "https_web".acme`
|
||||
|
|
319
go.mod
319
go.mod
|
@ -1,234 +1,230 @@
|
|||
module github.com/spiffe/spire
|
||||
|
||||
go 1.23.6
|
||||
go 1.24.4
|
||||
|
||||
require (
|
||||
cloud.google.com/go/iam v1.3.0
|
||||
cloud.google.com/go/kms v1.20.4
|
||||
cloud.google.com/go/secretmanager v1.14.2
|
||||
cloud.google.com/go/security v1.18.2
|
||||
cloud.google.com/go/storage v1.50.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.1
|
||||
cloud.google.com/go/iam v1.5.2
|
||||
cloud.google.com/go/kms v1.22.0
|
||||
cloud.google.com/go/secretmanager v1.15.0
|
||||
cloud.google.com/go/security v1.18.5
|
||||
cloud.google.com/go/storage v1.55.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/keyvault/azkeys v0.10.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute v1.0.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork v1.1.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0
|
||||
github.com/GoogleCloudPlatform/cloudsql-proxy v1.37.4
|
||||
github.com/GoogleCloudPlatform/cloudsql-proxy v1.37.7
|
||||
github.com/Keyfactor/ejbca-go-client-sdk v1.0.2
|
||||
github.com/Masterminds/sprig/v3 v3.3.0
|
||||
github.com/Microsoft/go-winio v0.6.2
|
||||
github.com/andres-erbsen/clock v0.0.0-20160526145045-9e14626cd129
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.0
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.0
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.53
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.24
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.5
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.16
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.69
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.31
|
||||
github.com/aws/aws-sdk-go-v2/feature/rds/auth v1.5.1
|
||||
github.com/aws/aws-sdk-go-v2/service/acmpca v1.37.0
|
||||
github.com/aws/aws-sdk-go-v2/service/ec2 v1.202.1
|
||||
github.com/aws/aws-sdk-go-v2/service/iam v1.38.1
|
||||
github.com/aws/aws-sdk-go-v2/service/kms v1.37.8
|
||||
github.com/aws/aws-sdk-go-v2/service/organizations v1.37.0
|
||||
github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.16.0
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.75.1
|
||||
github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.34.0
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.8
|
||||
github.com/aws/smithy-go v1.22.2
|
||||
github.com/aws/aws-sdk-go-v2/service/acmpca v1.40.0
|
||||
github.com/aws/aws-sdk-go-v2/service/ec2 v1.229.0
|
||||
github.com/aws/aws-sdk-go-v2/service/iam v1.43.0
|
||||
github.com/aws/aws-sdk-go-v2/service/kms v1.41.0
|
||||
github.com/aws/aws-sdk-go-v2/service/organizations v1.39.0
|
||||
github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.17.0
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.83.0
|
||||
github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.35.0
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.34.0
|
||||
github.com/aws/smithy-go v1.22.4
|
||||
github.com/blang/semver/v4 v4.0.0
|
||||
github.com/cenkalti/backoff/v4 v4.3.0
|
||||
github.com/docker/docker v27.5.1+incompatible
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.3
|
||||
github.com/docker/docker v28.3.1+incompatible
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4
|
||||
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa
|
||||
github.com/go-jose/go-jose/v4 v4.0.4
|
||||
github.com/go-sql-driver/mysql v1.8.1
|
||||
github.com/go-jose/go-jose/v4 v4.1.1
|
||||
github.com/go-sql-driver/mysql v1.9.3
|
||||
github.com/godbus/dbus/v5 v5.1.0
|
||||
github.com/gofrs/uuid/v5 v5.3.0
|
||||
github.com/gofrs/uuid/v5 v5.3.2
|
||||
github.com/gogo/status v1.1.1
|
||||
github.com/google/btree v1.1.3
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/google/go-containerregistry v0.20.3
|
||||
github.com/google/go-tpm v0.9.3
|
||||
github.com/google/go-tpm-tools v0.4.4
|
||||
github.com/googleapis/gax-go/v2 v2.14.1
|
||||
github.com/google/go-cmp v0.7.0
|
||||
github.com/google/go-containerregistry v0.20.6
|
||||
github.com/google/go-tpm v0.9.5
|
||||
github.com/google/go-tpm-tools v0.4.5
|
||||
github.com/googleapis/gax-go/v2 v2.14.2
|
||||
github.com/gorilla/handlers v1.5.2
|
||||
github.com/hashicorp/go-hclog v1.6.3
|
||||
github.com/hashicorp/go-metrics v0.5.4
|
||||
github.com/hashicorp/go-plugin v1.6.3
|
||||
github.com/hashicorp/hcl v1.0.1-vault-7
|
||||
github.com/hashicorp/vault/api v1.15.0
|
||||
github.com/hashicorp/vault/sdk v0.14.1
|
||||
github.com/hashicorp/vault/api v1.20.0
|
||||
github.com/hashicorp/vault/sdk v0.18.0
|
||||
github.com/imdario/mergo v0.3.16
|
||||
github.com/imkira/go-observer v1.0.3
|
||||
github.com/jackc/pgx/v5 v5.7.2
|
||||
github.com/jackc/pgx/v5 v5.7.5
|
||||
github.com/jinzhu/gorm v1.9.16
|
||||
github.com/lib/pq v1.10.9
|
||||
github.com/mattn/go-sqlite3 v1.14.24
|
||||
github.com/mattn/go-sqlite3 v1.14.28
|
||||
github.com/mitchellh/cli v1.1.5
|
||||
github.com/open-policy-agent/opa v0.70.0
|
||||
github.com/prometheus/client_golang v1.20.5
|
||||
github.com/shirou/gopsutil/v4 v4.24.12
|
||||
github.com/sigstore/cosign/v2 v2.4.1
|
||||
github.com/sigstore/rekor v1.3.9
|
||||
github.com/sigstore/sigstore v1.8.12
|
||||
github.com/open-policy-agent/opa v1.5.1
|
||||
github.com/prometheus/client_golang v1.22.0
|
||||
github.com/shirou/gopsutil/v4 v4.25.6
|
||||
github.com/sigstore/cosign/v2 v2.5.2
|
||||
github.com/sigstore/rekor v1.3.10
|
||||
github.com/sigstore/sigstore v1.9.5
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spiffe/go-spiffe/v2 v2.5.0
|
||||
github.com/spiffe/spire-api-sdk v1.2.5-0.20240916165922-16526993814a
|
||||
github.com/spiffe/spire-plugin-sdk v1.4.4-0.20240701180828-594312f4444d
|
||||
github.com/spiffe/spire-api-sdk v1.2.5-0.20250109200630-101d5e7de758
|
||||
github.com/spiffe/spire-plugin-sdk v1.4.4-0.20250606112051-68609d83ce7c
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/uber-go/tally/v4 v4.1.16
|
||||
github.com/uber-go/tally/v4 v4.1.17
|
||||
github.com/valyala/fastjson v1.6.4
|
||||
golang.org/x/crypto v0.32.0
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56
|
||||
golang.org/x/net v0.34.0
|
||||
golang.org/x/sync v0.10.0
|
||||
golang.org/x/sys v0.29.0
|
||||
golang.org/x/time v0.10.0
|
||||
google.golang.org/api v0.220.0
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250127172529-29210b9bc287
|
||||
google.golang.org/grpc v1.70.0
|
||||
google.golang.org/protobuf v1.36.4
|
||||
k8s.io/api v0.32.1
|
||||
k8s.io/apimachinery v0.32.1
|
||||
k8s.io/client-go v0.32.1
|
||||
k8s.io/kube-aggregator v0.32.1
|
||||
k8s.io/mount-utils v0.32.1
|
||||
sigs.k8s.io/controller-runtime v0.20.1
|
||||
golang.org/x/crypto v0.39.0
|
||||
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0
|
||||
golang.org/x/net v0.41.0
|
||||
golang.org/x/sync v0.15.0
|
||||
golang.org/x/sys v0.33.0
|
||||
golang.org/x/time v0.12.0
|
||||
google.golang.org/api v0.240.0
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822
|
||||
google.golang.org/grpc v1.73.0
|
||||
google.golang.org/protobuf v1.36.6
|
||||
k8s.io/api v0.33.2
|
||||
k8s.io/apimachinery v0.33.2
|
||||
k8s.io/client-go v0.33.2
|
||||
k8s.io/kube-aggregator v0.33.2
|
||||
k8s.io/mount-utils v0.33.2
|
||||
sigs.k8s.io/controller-runtime v0.21.0
|
||||
)
|
||||
|
||||
require (
|
||||
cel.dev/expr v0.19.0 // indirect
|
||||
cloud.google.com/go v0.116.0 // indirect
|
||||
cloud.google.com/go/auth v0.14.1 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.7 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.6.0 // indirect
|
||||
cloud.google.com/go/longrunning v0.6.2 // indirect
|
||||
cloud.google.com/go/monitoring v1.21.2 // indirect
|
||||
cel.dev/expr v0.23.0 // indirect
|
||||
cloud.google.com/go v0.121.1 // indirect
|
||||
cloud.google.com/go/auth v0.16.2 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.7.0 // indirect
|
||||
cloud.google.com/go/longrunning v0.6.7 // indirect
|
||||
cloud.google.com/go/monitoring v1.24.2 // indirect
|
||||
dario.cat/mergo v1.0.1 // indirect
|
||||
filippo.io/edwards25519 v1.1.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/keyvault/internal v0.7.1 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect
|
||||
github.com/DataDog/datadog-go v3.2.0+incompatible // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 // indirect
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.3.0 // indirect
|
||||
github.com/OneOfOne/xxhash v1.2.8 // indirect
|
||||
github.com/agnivade/levenshtein v1.2.0 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.3.1 // indirect
|
||||
github.com/agnivade/levenshtein v1.2.1 // indirect
|
||||
github.com/armon/go-radix v1.0.0 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.30 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.30 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.30 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ecr v1.24.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.21.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.9 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.36 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.36 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.36 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.25.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.2 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bgentry/speakeasy v0.1.0 // indirect
|
||||
github.com/bgentry/speakeasy v0.2.0 // indirect
|
||||
github.com/blang/semver v3.5.1+incompatible // indirect
|
||||
github.com/cenkalti/backoff/v5 v5.0.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f // indirect
|
||||
github.com/containerd/errdefs v1.0.0 // indirect
|
||||
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 // indirect
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 // indirect
|
||||
github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/docker/cli v27.5.0+incompatible // indirect
|
||||
github.com/docker/cli v28.2.2+incompatible // indirect
|
||||
github.com/docker/distribution v2.8.3+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.8.2 // indirect
|
||||
github.com/docker/docker-credential-helpers v0.9.3 // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/ebitengine/purego v0.8.1 // indirect
|
||||
github.com/ebitengine/purego v0.8.4 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
|
||||
github.com/fatih/color v1.17.0 // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.9.11 // indirect
|
||||
github.com/fatih/color v1.18.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.9.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/go-chi/chi v4.1.2+incompatible // indirect
|
||||
github.com/go-ini/ini v1.67.0 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.3 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.4 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-openapi/analysis v0.23.0 // indirect
|
||||
github.com/go-openapi/errors v0.22.0 // indirect
|
||||
github.com/go-openapi/errors v0.22.1 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.21.0 // indirect
|
||||
github.com/go-openapi/loads v0.22.0 // indirect
|
||||
github.com/go-openapi/runtime v0.28.0 // indirect
|
||||
github.com/go-openapi/spec v0.21.0 // indirect
|
||||
github.com/go-openapi/strfmt v0.23.0 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/go-openapi/swag v0.23.1 // indirect
|
||||
github.com/go-openapi/validate v0.24.0 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.3.0 // indirect
|
||||
github.com/gobwas/glob v0.2.3 // indirect
|
||||
github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/mock v1.6.0 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
|
||||
github.com/golang/mock v1.7.0-rc.1 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/certificate-transparency-go v1.2.1 // indirect
|
||||
github.com/google/flatbuffers v23.5.26+incompatible // indirect
|
||||
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect
|
||||
github.com/google/go-configfs-tsm v0.2.2 // indirect
|
||||
github.com/google/go-sev-guest v0.9.3 // indirect
|
||||
github.com/google/go-tdx-guest v0.3.1 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/certificate-transparency-go v1.3.2 // indirect
|
||||
github.com/google/gnostic-models v0.6.9 // indirect
|
||||
github.com/google/go-configfs-tsm v0.3.3-0.20240919001351-b4b5b84fdcbc // indirect
|
||||
github.com/google/go-sev-guest v0.12.1 // indirect
|
||||
github.com/google/go-tdx-guest v0.3.2-0.20241009005452-097ee70d0843 // indirect
|
||||
github.com/google/logger v1.1.1 // indirect
|
||||
github.com/google/s2a-go v0.1.9 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
|
||||
github.com/gorilla/mux v1.8.1 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7 // indirect
|
||||
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
|
||||
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.8 // indirect
|
||||
github.com/hashicorp/go-secure-stdlib/parseutil v0.2.0 // indirect
|
||||
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect
|
||||
github.com/hashicorp/go-sockaddr v1.0.6 // indirect
|
||||
github.com/hashicorp/go-sockaddr v1.0.7 // indirect
|
||||
github.com/hashicorp/golang-lru v1.0.2 // indirect
|
||||
github.com/hashicorp/yamux v0.1.1 // indirect
|
||||
github.com/hashicorp/yamux v0.1.2 // indirect
|
||||
github.com/huandu/xstrings v1.5.0 // indirect
|
||||
github.com/in-toto/attestation v1.1.1 // indirect
|
||||
github.com/in-toto/in-toto-golang v0.9.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
|
||||
github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.1-0.20220621161143-b0104c826a24 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mailru/easyjson v0.9.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/sys/mountinfo v0.7.2 // indirect
|
||||
github.com/moby/sys/userns v0.1.0 // indirect
|
||||
github.com/moby/sys/sequential v0.6.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
|
@ -236,45 +232,46 @@ require (
|
|||
github.com/oklog/run v1.1.0 // indirect
|
||||
github.com/oklog/ulid v1.3.1 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.1 // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/pborman/uuid v1.2.1 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/posener/complete v1.2.3 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.55.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.63.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
|
||||
github.com/ryanuber/go-glob v1.0.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.4.0 // indirect
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.7.0 // indirect
|
||||
github.com/sassoftware/relic v7.2.1+incompatible // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.9.0 // indirect
|
||||
github.com/shibumi/go-pathspec v1.3.0 // indirect
|
||||
github.com/shopspring/decimal v1.4.0 // indirect
|
||||
github.com/sigstore/protobuf-specs v0.3.3 // indirect
|
||||
github.com/sigstore/timestamp-authority v1.2.2 // indirect
|
||||
github.com/sigstore/protobuf-specs v0.4.3 // indirect
|
||||
github.com/sigstore/sigstore-go v1.0.0 // indirect
|
||||
github.com/sigstore/timestamp-authority v1.2.8 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spf13/afero v1.11.0 // indirect
|
||||
github.com/spf13/cast v1.7.0 // indirect
|
||||
github.com/spf13/cobra v1.8.1 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/viper v1.19.0 // indirect
|
||||
github.com/spf13/afero v1.12.0 // indirect
|
||||
github.com/spf13/cast v1.7.1 // indirect
|
||||
github.com/spf13/cobra v1.9.1 // indirect
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/spf13/viper v1.20.1 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect
|
||||
github.com/tchap/go-patricia/v2 v2.3.1 // indirect
|
||||
github.com/tchap/go-patricia/v2 v2.3.2 // indirect
|
||||
github.com/theupdateframework/go-tuf v0.7.0 // indirect
|
||||
github.com/theupdateframework/go-tuf/v2 v2.1.1 // indirect
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||
github.com/transparency-dev/merkle v0.0.2 // indirect
|
||||
github.com/twmb/murmur3 v1.1.8 // indirect
|
||||
github.com/vbatts/tar-split v0.11.6 // indirect
|
||||
github.com/vbatts/tar-split v0.12.1 // indirect
|
||||
github.com/vektah/gqlparser/v2 v2.5.26 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
|
@ -282,34 +279,32 @@ require (
|
|||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
github.com/zeebo/errs v1.4.0 // indirect
|
||||
go.mongodb.org/mongo-driver v1.14.0 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.32.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
|
||||
go.opentelemetry.io/otel v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.32.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.34.0 // indirect
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.36.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect
|
||||
go.opentelemetry.io/otel v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.36.0 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
golang.org/x/mod v0.22.0 // indirect
|
||||
golang.org/x/oauth2 v0.25.0 // indirect
|
||||
golang.org/x/term v0.28.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422 // indirect
|
||||
golang.org/x/mod v0.25.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/term v0.32.0 // indirect
|
||||
golang.org/x/text v0.26.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
|
||||
k8s.io/utils v0.0.0-20241210054802-24370beab758 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
)
|
||||
|
|
|
@ -2,6 +2,7 @@ package agent
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
@ -114,13 +115,20 @@ func (a *Agent) Run(ctx context.Context) error {
|
|||
)
|
||||
|
||||
for {
|
||||
as, err = a.attest(ctx, sto, cat, metrics, nodeAttestor)
|
||||
if err == nil {
|
||||
break
|
||||
insecureBootstrap := false
|
||||
bootstrapTrustBundle, err := sto.LoadBundle()
|
||||
if errors.Is(err, storage.ErrNotCached) {
|
||||
bootstrapTrustBundle, insecureBootstrap, err = a.c.TrustBundleSources.GetBundle()
|
||||
}
|
||||
if err == nil {
|
||||
as, err = a.attest(ctx, sto, cat, metrics, nodeAttestor, bootstrapTrustBundle, insecureBootstrap)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
if status.Code(err) == codes.PermissionDenied {
|
||||
return err
|
||||
if status.Code(err) == codes.PermissionDenied {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
nextDuration := attBackoff.NextBackOff()
|
||||
|
@ -141,7 +149,15 @@ func (a *Agent) Run(ctx context.Context) error {
|
|||
}
|
||||
}
|
||||
} else {
|
||||
as, err = a.attest(ctx, sto, cat, metrics, nodeAttestor)
|
||||
insecureBootstrap := false
|
||||
bootstrapTrustBundle, err := sto.LoadBundle()
|
||||
if errors.Is(err, storage.ErrNotCached) {
|
||||
bootstrapTrustBundle, insecureBootstrap, err = a.c.TrustBundleSources.GetBundle()
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
as, err = a.attest(ctx, sto, cat, metrics, nodeAttestor, bootstrapTrustBundle, insecureBootstrap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -249,19 +265,19 @@ func (a *Agent) setupProfiling(ctx context.Context) (stop func()) {
|
|||
}
|
||||
}
|
||||
|
||||
func (a *Agent) attest(ctx context.Context, sto storage.Storage, cat catalog.Catalog, metrics telemetry.Metrics, na nodeattestor.NodeAttestor) (*node_attestor.AttestationResult, error) {
|
||||
func (a *Agent) attest(ctx context.Context, sto storage.Storage, cat catalog.Catalog, metrics telemetry.Metrics, na nodeattestor.NodeAttestor, bootstrapTrustBundle []*x509.Certificate, insecureBootstrap bool) (*node_attestor.AttestationResult, error) {
|
||||
config := node_attestor.Config{
|
||||
Catalog: cat,
|
||||
Metrics: metrics,
|
||||
JoinToken: a.c.JoinToken,
|
||||
TrustDomain: a.c.TrustDomain,
|
||||
TrustBundle: a.c.TrustBundle,
|
||||
InsecureBootstrap: a.c.InsecureBootstrap,
|
||||
Storage: sto,
|
||||
Log: a.c.Log.WithField(telemetry.SubsystemName, telemetry.Attestor),
|
||||
ServerAddress: a.c.ServerAddress,
|
||||
NodeAttestor: na,
|
||||
TLSPolicy: a.c.TLSPolicy,
|
||||
Catalog: cat,
|
||||
Metrics: metrics,
|
||||
JoinToken: a.c.JoinToken,
|
||||
TrustDomain: a.c.TrustDomain,
|
||||
BootstrapTrustBundle: bootstrapTrustBundle,
|
||||
InsecureBootstrap: insecureBootstrap,
|
||||
Storage: sto,
|
||||
Log: a.c.Log.WithField(telemetry.SubsystemName, telemetry.Attestor),
|
||||
ServerAddress: a.c.ServerAddress,
|
||||
NodeAttestor: na,
|
||||
TLSPolicy: a.c.TLSPolicy,
|
||||
}
|
||||
return node_attestor.New(&config).Attest(ctx)
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ package debug
|
|||
import (
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -13,6 +14,7 @@ import (
|
|||
debugv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/agent/debug/v1"
|
||||
"github.com/spiffe/spire-api-sdk/proto/spire/api/types"
|
||||
"github.com/spiffe/spire/pkg/agent/manager"
|
||||
"github.com/spiffe/spire/pkg/common/util"
|
||||
"github.com/spiffe/spire/test/clock"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
|
@ -92,15 +94,32 @@ func (s *Service) GetInfo(context.Context, *debugv1.GetInfoRequest) (*debugv1.Ge
|
|||
})
|
||||
}
|
||||
|
||||
uptime, err := util.CheckedCast[int32](int64(s.uptime().Seconds()))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid value for uptime: %w", err)
|
||||
}
|
||||
x509SvidsCount, err := util.CheckedCast[int32](s.m.CountX509SVIDs())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("out of range value for X.509 SVIDs count: %w", err)
|
||||
}
|
||||
jwtSvidsCount, err := util.CheckedCast[int32](s.m.CountJWTSVIDs())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("out of range value for JWT SVIDs count: %w", err)
|
||||
}
|
||||
svidstoreX509SvidsCount, err := util.CheckedCast[int32](s.m.CountSVIDStoreX509SVIDs())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("out of range value for SVIDStore X.509 SVIDs count: %w", err)
|
||||
}
|
||||
|
||||
// Reset clock and set current response
|
||||
s.getInfoResp.ts = s.clock.Now()
|
||||
s.getInfoResp.resp = &debugv1.GetInfoResponse{
|
||||
SvidChain: svidChain,
|
||||
Uptime: int32(s.uptime().Seconds()),
|
||||
SvidsCount: int32(s.m.CountX509SVIDs()),
|
||||
CachedX509SvidsCount: int32(s.m.CountX509SVIDs()),
|
||||
CachedJwtSvidsCount: int32(s.m.CountJWTSVIDs()),
|
||||
CachedSvidstoreX509SvidsCount: int32(s.m.CountSVIDStoreX509SVIDs()),
|
||||
Uptime: uptime,
|
||||
SvidsCount: x509SvidsCount,
|
||||
CachedX509SvidsCount: x509SvidsCount,
|
||||
CachedJwtSvidsCount: jwtSvidsCount,
|
||||
CachedSvidstoreX509SvidsCount: svidstoreX509SvidsCount,
|
||||
LastSyncSuccess: s.m.GetLastSync().UTC().Unix(),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ package delegatedidentity
|
|||
import (
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
|
@ -261,7 +262,7 @@ func composeX509SVIDBySelectors(update *cache.WorkloadUpdate) (*delegatedidentit
|
|||
|
||||
// check if SVIDs exist for the identity
|
||||
if len(identity.SVID) == 0 {
|
||||
return nil, fmt.Errorf("unable to get SVID from identity")
|
||||
return nil, errors.New("unable to get SVID from identity")
|
||||
}
|
||||
|
||||
id, err := idutil.IDProtoFromString(identity.Entry.SpiffeId)
|
||||
|
|
|
@ -48,17 +48,17 @@ type Attestor interface {
|
|||
}
|
||||
|
||||
type Config struct {
|
||||
Catalog catalog.Catalog
|
||||
Metrics telemetry.Metrics
|
||||
JoinToken string
|
||||
TrustDomain spiffeid.TrustDomain
|
||||
TrustBundle []*x509.Certificate
|
||||
InsecureBootstrap bool
|
||||
Storage storage.Storage
|
||||
Log logrus.FieldLogger
|
||||
ServerAddress string
|
||||
NodeAttestor nodeattestor.NodeAttestor
|
||||
TLSPolicy tlspolicy.Policy
|
||||
Catalog catalog.Catalog
|
||||
Metrics telemetry.Metrics
|
||||
JoinToken string
|
||||
TrustDomain spiffeid.TrustDomain
|
||||
BootstrapTrustBundle []*x509.Certificate
|
||||
InsecureBootstrap bool
|
||||
Storage storage.Storage
|
||||
Log logrus.FieldLogger
|
||||
ServerAddress string
|
||||
NodeAttestor nodeattestor.NodeAttestor
|
||||
TLSPolicy tlspolicy.Policy
|
||||
}
|
||||
|
||||
type attestor struct {
|
||||
|
@ -157,12 +157,12 @@ func (a *attestor) loadBundle() (*spiffebundle.Bundle, error) {
|
|||
bundle, err := a.c.Storage.LoadBundle()
|
||||
if errors.Is(err, storage.ErrNotCached) {
|
||||
if a.c.InsecureBootstrap {
|
||||
if len(a.c.TrustBundle) > 0 {
|
||||
if len(a.c.BootstrapTrustBundle) > 0 {
|
||||
a.c.Log.Warn("Trust bundle will be ignored; performing insecure bootstrap")
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
bundle = a.c.TrustBundle
|
||||
bundle = a.c.BootstrapTrustBundle
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("load bundle: %w", err)
|
||||
}
|
||||
|
|
|
@ -317,16 +317,16 @@ func TestAttestor(t *testing.T) {
|
|||
// create the attestor
|
||||
log, _ := test.NewNullLogger()
|
||||
attestor := attestor.New(&attestor.Config{
|
||||
Catalog: catalog,
|
||||
Metrics: telemetry.Blackhole{},
|
||||
JoinToken: testCase.agentService.joinToken,
|
||||
Storage: sto,
|
||||
Log: log,
|
||||
TrustDomain: trustDomain,
|
||||
TrustBundle: makeTrustBundle(testCase.bootstrapBundle),
|
||||
InsecureBootstrap: testCase.insecureBootstrap,
|
||||
ServerAddress: listener.Addr().String(),
|
||||
NodeAttestor: agentNA,
|
||||
Catalog: catalog,
|
||||
Metrics: telemetry.Blackhole{},
|
||||
JoinToken: testCase.agentService.joinToken,
|
||||
Storage: sto,
|
||||
Log: log,
|
||||
TrustDomain: trustDomain,
|
||||
BootstrapTrustBundle: makeTrustBundle(testCase.bootstrapBundle),
|
||||
InsecureBootstrap: testCase.insecureBootstrap,
|
||||
ServerAddress: listener.Addr().String(),
|
||||
NodeAttestor: agentNA,
|
||||
})
|
||||
|
||||
// perform attestation
|
||||
|
|
|
@ -2,7 +2,7 @@ package catalog
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"errors"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spiffe/go-spiffe/v2/spiffeid"
|
||||
|
@ -84,7 +84,7 @@ func (repo *Repository) Close() {
|
|||
|
||||
func Load(ctx context.Context, config Config) (_ *Repository, err error) {
|
||||
if c, ok := config.PluginConfigs.Find(nodeAttestorType, jointoken.PluginName); ok && c.IsEnabled() && c.IsExternal() {
|
||||
return nil, fmt.Errorf("the built-in join_token node attestor cannot be overridden by an external plugin")
|
||||
return nil, errors.New("the built-in join_token node attestor cannot be overridden by an external plugin")
|
||||
}
|
||||
|
||||
// Load the plugins and populate the repository
|
||||
|
|
|
@ -8,7 +8,6 @@ import (
|
|||
"github.com/spiffe/spire/pkg/agent/plugin/nodeattestor/httpchallenge"
|
||||
"github.com/spiffe/spire/pkg/agent/plugin/nodeattestor/jointoken"
|
||||
"github.com/spiffe/spire/pkg/agent/plugin/nodeattestor/k8spsat"
|
||||
"github.com/spiffe/spire/pkg/agent/plugin/nodeattestor/k8ssat"
|
||||
"github.com/spiffe/spire/pkg/agent/plugin/nodeattestor/sshpop"
|
||||
"github.com/spiffe/spire/pkg/agent/plugin/nodeattestor/tpmdevid"
|
||||
"github.com/spiffe/spire/pkg/agent/plugin/nodeattestor/x509pop"
|
||||
|
@ -41,7 +40,6 @@ func (repo *nodeAttestorRepository) BuiltIns() []catalog.BuiltIn {
|
|||
httpchallenge.BuiltIn(),
|
||||
jointoken.BuiltIn(),
|
||||
k8spsat.BuiltIn(),
|
||||
k8ssat.BuiltIn(),
|
||||
sshpop.BuiltIn(),
|
||||
tpmdevid.BuiltIn(),
|
||||
x509pop.BuiltIn(),
|
||||
|
|
|
@ -39,6 +39,7 @@ var (
|
|||
RevisionNumber: true,
|
||||
StoreSvid: true,
|
||||
Hint: true,
|
||||
CreatedAt: true,
|
||||
}
|
||||
)
|
||||
|
||||
|
@ -416,6 +417,27 @@ func (c *client) syncEntries(ctx context.Context, cachedEntries map[string]*comm
|
|||
return stats, nil
|
||||
}
|
||||
|
||||
func entryIsStale(entry *common.RegistrationEntry, revisionNumber, revisionCreatedAt int64) bool {
|
||||
if entry.RevisionNumber != revisionNumber {
|
||||
return true
|
||||
}
|
||||
|
||||
// TODO: remove in SPIRE 1.14
|
||||
if revisionCreatedAt == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Verify that the CreatedAt of the entries match. If they are different, they are
|
||||
// completely different entries even if the revision number is the same.
|
||||
// This can happen for example if an entry is deleted and recreated with the
|
||||
// same entry id.
|
||||
if entry.CreatedAt != revisionCreatedAt {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *client) streamAndSyncEntries(ctx context.Context, entryClient entryv1.EntryClient, cachedEntries map[string]*common.RegistrationEntry) (stats SyncEntriesStats, err error) {
|
||||
// Build a set of all the entries to be removed. This set is initialized
|
||||
// with all entries currently known. As entries are synced down from the
|
||||
|
@ -459,7 +481,7 @@ func (c *client) streamAndSyncEntries(ctx context.Context, entryClient entryv1.E
|
|||
// If entry is either not cached or is stale, record the ID so
|
||||
// the full entry can be requested after syncing down all
|
||||
// entry revisions.
|
||||
if cachedEntry, ok := cachedEntries[entryRevision.Id]; !ok || cachedEntry.RevisionNumber < entryRevision.RevisionNumber {
|
||||
if cachedEntry, ok := cachedEntries[entryRevision.Id]; !ok || entryIsStale(cachedEntry, entryRevision.GetRevisionNumber(), entryRevision.GetCreatedAt()) {
|
||||
needFull = append(needFull, entryRevision.Id)
|
||||
}
|
||||
}
|
||||
|
@ -487,7 +509,7 @@ func (c *client) streamAndSyncEntries(ctx context.Context, entryClient entryv1.E
|
|||
switch {
|
||||
case !ok:
|
||||
stats.Missing++
|
||||
case cachedEntry.RevisionNumber < entry.RevisionNumber:
|
||||
case entryIsStale(cachedEntry, entry.GetRevisionNumber(), entry.GetCreatedAt()):
|
||||
stats.Stale++
|
||||
}
|
||||
|
||||
|
@ -549,10 +571,7 @@ func (c *client) streamAndSyncEntries(ctx context.Context, entryClient entryv1.E
|
|||
// time using the assumed page size.
|
||||
for len(needFull) > 0 {
|
||||
// Request up to a page full of full entries
|
||||
n := len(needFull)
|
||||
if n > pageSize {
|
||||
n = pageSize
|
||||
}
|
||||
n := min(len(needFull), pageSize)
|
||||
if err := stream.Send(&entryv1.SyncAuthorizedEntriesRequest{Ids: needFull[:n]}); err != nil {
|
||||
return SyncEntriesStats{}, err
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
svidv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/svid/v1"
|
||||
"github.com/spiffe/spire-api-sdk/proto/spire/api/types"
|
||||
"github.com/spiffe/spire/pkg/common/telemetry"
|
||||
"github.com/spiffe/spire/pkg/server/api"
|
||||
"github.com/spiffe/spire/pkg/server/api/entry/v1"
|
||||
"github.com/spiffe/spire/proto/spire/common"
|
||||
"github.com/spiffe/spire/test/spiretest"
|
||||
|
@ -285,14 +286,19 @@ func TestSyncUpdatesEntries(t *testing.T) {
|
|||
assert.Equal(t, expected, cachedEntries)
|
||||
}
|
||||
|
||||
entryA1 := makeEntry("A", 1)
|
||||
entryB1 := makeEntry("B", 1)
|
||||
entryC1 := makeEntry("C", 1)
|
||||
entryD1 := makeEntry("D", 1)
|
||||
firstDate := time.Date(2024, time.December, 31, 0, 0, 0, 0, time.UTC)
|
||||
secondDate := time.Date(2025, time.January, 1, 0, 0, 0, 0, time.UTC)
|
||||
|
||||
entryA2 := makeEntry("A", 2)
|
||||
entryB2 := makeEntry("B", 2)
|
||||
entryC2 := makeEntry("C", 2)
|
||||
entryA1 := makeEntry("A", 1, firstDate)
|
||||
entryB1 := makeEntry("B", 1, firstDate)
|
||||
entryC1 := makeEntry("C", 1, firstDate)
|
||||
entryD1 := makeEntry("D", 1, firstDate)
|
||||
|
||||
entryA2 := makeEntry("A", 2, firstDate)
|
||||
entryB2 := makeEntry("B", 2, firstDate)
|
||||
entryC2 := makeEntry("C", 2, firstDate)
|
||||
|
||||
entryB1prime := makeEntry("B", 1, secondDate)
|
||||
|
||||
// No entries yet
|
||||
syncAndAssertEntries(t, 0, 0, 0, 0)
|
||||
|
@ -314,6 +320,12 @@ func TestSyncUpdatesEntries(t *testing.T) {
|
|||
|
||||
// Sync again but with no changes.
|
||||
syncAndAssertEntries(t, 3, 0, 0, 0, entryA2, entryB2, entryC2)
|
||||
|
||||
// Sync again after recreating an entry with the same entry ID, which should be marked stale
|
||||
syncAndAssertEntries(t, 3, 0, 1, 0, entryA2, entryB1prime, entryC2)
|
||||
|
||||
// Sync again after the database has been rolled back to a previous version
|
||||
syncAndAssertEntries(t, 4, 1, 3, 0, entryA1, entryB1, entryC1, entryD1)
|
||||
}
|
||||
|
||||
func TestRenewSVID(t *testing.T) {
|
||||
|
@ -1012,7 +1024,13 @@ func (c *fakeEntryServer) GetAuthorizedEntries(_ context.Context, in *entryv1.Ge
|
|||
|
||||
func (c *fakeEntryServer) SyncAuthorizedEntries(stream entryv1.Entry_SyncAuthorizedEntriesServer) error {
|
||||
const entryPageSize = 2
|
||||
return entry.SyncAuthorizedEntries(stream, c.entries, entryPageSize)
|
||||
|
||||
entries := []api.ReadOnlyEntry{}
|
||||
for _, entry := range c.entries {
|
||||
entries = append(entries, api.NewReadOnlyEntry(entry))
|
||||
}
|
||||
|
||||
return entry.SyncAuthorizedEntries(stream, entries, entryPageSize)
|
||||
}
|
||||
|
||||
type fakeBundleServer struct {
|
||||
|
@ -1142,6 +1160,7 @@ func checkAuthorizedEntryOutputMask(outputMask *types.EntryMask) error {
|
|||
RevisionNumber: true,
|
||||
StoreSvid: true,
|
||||
Hint: true,
|
||||
CreatedAt: true,
|
||||
}, protocmp.Transform()); diff != "" {
|
||||
return status.Errorf(codes.InvalidArgument, "invalid output mask requested: %s", diff)
|
||||
}
|
||||
|
@ -1162,12 +1181,13 @@ func makeCommonBundle(trustDomainName string) *common.Bundle {
|
|||
}
|
||||
}
|
||||
|
||||
func makeEntry(id string, revisionNumber int64) *types.Entry {
|
||||
func makeEntry(id string, revisionNumber int64, createdAt time.Time) *types.Entry {
|
||||
return &types.Entry{
|
||||
Id: id,
|
||||
SpiffeId: &types.SPIFFEID{TrustDomain: "example.org", Path: "/workload"},
|
||||
ParentId: &types.SPIFFEID{TrustDomain: "example.org", Path: "/agent"},
|
||||
Selectors: []*types.Selector{{Type: "not", Value: "relevant"}},
|
||||
RevisionNumber: revisionNumber,
|
||||
CreatedAt: createdAt.Unix(),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ func slicedEntryFromProto(e *types.Entry) (*common.RegistrationEntry, error) {
|
|||
}
|
||||
|
||||
if e.Id == "" {
|
||||
return nil, fmt.Errorf("missing entry ID")
|
||||
return nil, errors.New("missing entry ID")
|
||||
}
|
||||
|
||||
spiffeID, err := spiffeIDFromProto(e.SpiffeId)
|
||||
|
@ -81,5 +81,6 @@ func slicedEntryFromProto(e *types.Entry) (*common.RegistrationEntry, error) {
|
|||
Admin: e.Admin,
|
||||
Downstream: e.Downstream,
|
||||
Hint: e.Hint,
|
||||
CreatedAt: e.CreatedAt,
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -43,7 +43,7 @@ func GetCgroups(pid int32, fs FileSystem) ([]Cgroup, error) {
|
|||
token := scanner.Text()
|
||||
substrings := strings.SplitN(token, ":", 3)
|
||||
if len(substrings) < 3 {
|
||||
return nil, fmt.Errorf("cgroup entry contains %v colons, but expected at least 2 colons: %q", len(substrings), token)
|
||||
return nil, fmt.Errorf("invalid cgroup entry, contains %v colon separated fields but expected at least 3: %q", len(substrings), token)
|
||||
}
|
||||
cgroups = append(cgroups, Cgroup{
|
||||
HierarchyID: substrings[0],
|
||||
|
|
|
@ -23,9 +23,21 @@ const (
|
|||
2:blkio:/user.slice
|
||||
1:name=systemd:/user.slice/user-1000.slice/session-2.scope
|
||||
`
|
||||
// cgBadFormat is a malformed set of cgroup entries (no slash separator)
|
||||
// cgBadFormat is a malformed set of cgroup entries (missing cgroup-path)
|
||||
cgBadFormat = `11:hugetlb
|
||||
`
|
||||
// cgUnified is a good set of cgroup entries including unified
|
||||
cgUnified = `10:devices:/user.slice
|
||||
9:net_cls,net_prio:/
|
||||
8:blkio:/
|
||||
7:freezer:/
|
||||
6:perf_event:/
|
||||
5:cpuset:/
|
||||
4:memory:/user.slice
|
||||
3:pids:/user.slice/user-1000.slice/user@1000.service
|
||||
2:cpu,cpuacct:/
|
||||
1:name=systemd:/user.slice/user-1000.slice/user@1000.service/gnome-terminal-server.service
|
||||
0::/user.slice/user-1000.slice/user@1000.service/gnome-terminal-server.service`
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -42,6 +54,20 @@ var (
|
|||
{"2", "blkio", "/user.slice"},
|
||||
{"1", "name=systemd", "/user.slice/user-1000.slice/session-2.scope"},
|
||||
}
|
||||
|
||||
expectUnifiedCgroup = []Cgroup{
|
||||
{"10", "devices", "/user.slice"},
|
||||
{"9", "net_cls,net_prio", "/"},
|
||||
{"8", "blkio", "/"},
|
||||
{"7", "freezer", "/"},
|
||||
{"6", "perf_event", "/"},
|
||||
{"5", "cpuset", "/"},
|
||||
{"4", "memory", "/user.slice"},
|
||||
{"3", "pids", "/user.slice/user-1000.slice/user@1000.service"},
|
||||
{"2", "cpu,cpuacct", "/"},
|
||||
{"1", "name=systemd", "/user.slice/user-1000.slice/user@1000.service/gnome-terminal-server.service"},
|
||||
{"0", "", "/user.slice/user-1000.slice/user@1000.service/gnome-terminal-server.service"},
|
||||
}
|
||||
)
|
||||
|
||||
func TestCgroups(t *testing.T) {
|
||||
|
@ -67,10 +93,21 @@ func TestCgroupsBadFormat(t *testing.T) {
|
|||
"/proc/123/cgroup": cgBadFormat,
|
||||
},
|
||||
})
|
||||
require.EqualError(t, err, `cgroup entry contains 2 colons, but expected at least 2 colons: "11:hugetlb"`)
|
||||
require.EqualError(t, err, `invalid cgroup entry, contains 2 colon separated fields but expected at least 3: "11:hugetlb"`)
|
||||
require.Nil(t, cgroups)
|
||||
}
|
||||
|
||||
func TestUnifiedCgroups(t *testing.T) {
|
||||
cgroups, err := GetCgroups(1234, FakeFileSystem{
|
||||
Files: map[string]string{
|
||||
"/proc/1234/cgroup": cgUnified,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, cgroups, 11)
|
||||
require.Equal(t, expectUnifiedCgroup, cgroups)
|
||||
}
|
||||
|
||||
type FakeFileSystem struct {
|
||||
Files map[string]string
|
||||
}
|
||||
|
|
|
@ -15,7 +15,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
_ "github.com/google/go-containerregistry/pkg/v1"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
|
@ -35,7 +34,6 @@ func TestNewVerifier(t *testing.T) {
|
|||
config.IgnoreSCT = true
|
||||
config.IgnoreTlog = true
|
||||
config.IgnoreAttestations = true
|
||||
config.RekorURL = "https://rekor.test.com"
|
||||
config.RegistryCredentials = map[string]*RegistryCredential{
|
||||
"docker.io": {
|
||||
Username: "testuser",
|
||||
|
@ -45,6 +43,15 @@ func TestNewVerifier(t *testing.T) {
|
|||
Username: "testuser",
|
||||
Password: "testpassword",
|
||||
},
|
||||
"nopassword.io": { // should warn and ignore
|
||||
Username: "testuser",
|
||||
Password: "",
|
||||
},
|
||||
"nousername.io": { // should warn and ignore
|
||||
Username: "",
|
||||
Password: "testpassword",
|
||||
},
|
||||
"nil.io": nil, // should ignore
|
||||
}
|
||||
|
||||
config.SkippedImages = map[string]struct{}{
|
||||
|
@ -58,6 +65,8 @@ func TestNewVerifier(t *testing.T) {
|
|||
|
||||
verifier := NewVerifier(config)
|
||||
require.NotNil(t, verifier)
|
||||
require.NotNil(t, verifier.config.Logger)
|
||||
require.Equal(t, verifier.config.RekorURL, publicRekorURL) // verify default public RekorURL
|
||||
|
||||
identityPlainValues := cosign.Identity{
|
||||
Issuer: "test-issuer",
|
||||
|
@ -70,7 +79,11 @@ func TestNewVerifier(t *testing.T) {
|
|||
expectedIdentites := []cosign.Identity{identityPlainValues, identityRegExp}
|
||||
|
||||
assert.Equal(t, config, verifier.config)
|
||||
assert.Equal(t, len(config.RegistryCredentials), len(verifier.authOptions))
|
||||
assert.NotNil(t, verifier.authOptions["docker.io"])
|
||||
assert.NotNil(t, verifier.authOptions["other.io"])
|
||||
assert.Nil(t, verifier.authOptions["nopassword.io"])
|
||||
assert.Nil(t, verifier.authOptions["nousername.io"])
|
||||
assert.Nil(t, verifier.authOptions["nil.io"])
|
||||
assert.ElementsMatch(t, expectedIdentites, verifier.allowedIdentities)
|
||||
assert.NotNil(t, verifier.sigstoreFunctions.verifyImageSignatures)
|
||||
assert.NotNil(t, verifier.sigstoreFunctions.verifyImageAttestations)
|
||||
|
@ -378,6 +391,107 @@ func TestVerify(t *testing.T) {
|
|||
expectedVerifyCallCount: 0,
|
||||
expectedAttestationsCallCount: 0,
|
||||
},
|
||||
{
|
||||
name: "fails to extract details from signatures missing cert",
|
||||
configureTest: func(ctx context.Context, _ *ImageVerifier, signatureVerifyFake *fakeCosignVerifySignatureFn, attestationsVerifyFake *fakeCosignVerifyAttestationsFn) {
|
||||
signature := &fakeSignature{
|
||||
payload: createFakePayload(),
|
||||
base64Signature: "base64signature",
|
||||
bundle: createFakeBundle(),
|
||||
}
|
||||
|
||||
signatureVerifyFake.Responses = append(signatureVerifyFake.Responses, struct {
|
||||
Signatures []oci.Signature
|
||||
BundleVerified bool
|
||||
Err error
|
||||
}{
|
||||
Signatures: []oci.Signature{signature},
|
||||
BundleVerified: true,
|
||||
Err: nil,
|
||||
})
|
||||
attestationsVerifyFake.Responses = append(attestationsVerifyFake.Responses, struct {
|
||||
Signatures []oci.Signature
|
||||
BundleVerified bool
|
||||
Err error
|
||||
}{
|
||||
Signatures: []oci.Signature{signature},
|
||||
BundleVerified: true,
|
||||
Err: nil,
|
||||
})
|
||||
},
|
||||
expectedSelectors: nil,
|
||||
expectedError: true,
|
||||
expectedVerifyCallCount: 1,
|
||||
expectedAttestationsCallCount: 1,
|
||||
},
|
||||
{
|
||||
name: "fails to extract details from signatures missing cert subject",
|
||||
configureTest: func(ctx context.Context, _ *ImageVerifier, signatureVerifyFake *fakeCosignVerifySignatureFn, attestationsVerifyFake *fakeCosignVerifyAttestationsFn) {
|
||||
signature := &fakeSignature{
|
||||
payload: createFakePayload(),
|
||||
base64Signature: "base64signature",
|
||||
cert: createSubjectlessTestCert(),
|
||||
bundle: createFakeBundle(),
|
||||
}
|
||||
|
||||
signatureVerifyFake.Responses = append(signatureVerifyFake.Responses, struct {
|
||||
Signatures []oci.Signature
|
||||
BundleVerified bool
|
||||
Err error
|
||||
}{
|
||||
Signatures: []oci.Signature{signature},
|
||||
BundleVerified: true,
|
||||
Err: nil,
|
||||
})
|
||||
attestationsVerifyFake.Responses = append(attestationsVerifyFake.Responses, struct {
|
||||
Signatures []oci.Signature
|
||||
BundleVerified bool
|
||||
Err error
|
||||
}{
|
||||
Signatures: []oci.Signature{signature},
|
||||
BundleVerified: true,
|
||||
Err: nil,
|
||||
})
|
||||
},
|
||||
expectedSelectors: nil,
|
||||
expectedError: true,
|
||||
expectedVerifyCallCount: 1,
|
||||
expectedAttestationsCallCount: 1,
|
||||
},
|
||||
{
|
||||
name: "fails to extract details from signatures empty names cert subject",
|
||||
configureTest: func(ctx context.Context, _ *ImageVerifier, signatureVerifyFake *fakeCosignVerifySignatureFn, attestationsVerifyFake *fakeCosignVerifyAttestationsFn) {
|
||||
signature := &fakeSignature{
|
||||
payload: createFakePayload(),
|
||||
base64Signature: "base64signature",
|
||||
cert: createEmptynamesTestCert(),
|
||||
bundle: createFakeBundle(),
|
||||
}
|
||||
|
||||
signatureVerifyFake.Responses = append(signatureVerifyFake.Responses, struct {
|
||||
Signatures []oci.Signature
|
||||
BundleVerified bool
|
||||
Err error
|
||||
}{
|
||||
Signatures: []oci.Signature{signature},
|
||||
BundleVerified: true,
|
||||
Err: nil,
|
||||
})
|
||||
attestationsVerifyFake.Responses = append(attestationsVerifyFake.Responses, struct {
|
||||
Signatures []oci.Signature
|
||||
BundleVerified bool
|
||||
Err error
|
||||
}{
|
||||
Signatures: []oci.Signature{signature},
|
||||
BundleVerified: true,
|
||||
Err: nil,
|
||||
})
|
||||
},
|
||||
expectedSelectors: nil,
|
||||
expectedError: true,
|
||||
expectedVerifyCallCount: 1,
|
||||
expectedAttestationsCallCount: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
|
@ -708,9 +822,35 @@ func createTestCert() *x509.Certificate {
|
|||
}
|
||||
}
|
||||
|
||||
func createSubjectlessTestCert() *x509.Certificate {
|
||||
return &x509.Certificate{
|
||||
Extensions: []pkix.Extension{
|
||||
{
|
||||
Id: []int{1, 3, 6, 1, 4, 1, 57264, 1, 1}, // OIDC issuer OID
|
||||
Value: []byte("test-issuer"),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func createEmptynamesTestCert() *x509.Certificate {
|
||||
return &x509.Certificate{
|
||||
Subject: pkix.Name{
|
||||
CommonName: "",
|
||||
},
|
||||
DNSNames: []string{""},
|
||||
Extensions: []pkix.Extension{
|
||||
{
|
||||
Id: []int{1, 3, 6, 1, 4, 1, 57264, 1, 1}, // OIDC issuer OID
|
||||
Value: []byte("test-issuer"),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func createFakePayload() []byte {
|
||||
signaturePayload := payload.SimpleContainerImage{
|
||||
Optional: map[string]interface{}{
|
||||
Optional: map[string]any{
|
||||
"subject": "test-subject",
|
||||
},
|
||||
}
|
||||
|
|
|
@ -2,12 +2,12 @@ package agent
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spiffe/go-spiffe/v2/spiffeid"
|
||||
"github.com/spiffe/spire/pkg/agent/trustbundlesources"
|
||||
"github.com/spiffe/spire/pkg/agent/workloadkey"
|
||||
"github.com/spiffe/spire/pkg/common/catalog"
|
||||
"github.com/spiffe/spire/pkg/common/health"
|
||||
|
@ -37,9 +37,6 @@ type Config struct {
|
|||
// The TLS Certificate resource name to use for the default X509-SVID with Envoy SDS
|
||||
DefaultSVIDName string
|
||||
|
||||
// If true, the agent will bootstrap insecurely with the server
|
||||
InsecureBootstrap bool
|
||||
|
||||
// If true, the agent retries bootstrap with backoff
|
||||
RetryBootstrap bool
|
||||
|
||||
|
@ -75,7 +72,9 @@ type Config struct {
|
|||
|
||||
// Trust domain and associated CA bundle
|
||||
TrustDomain spiffeid.TrustDomain
|
||||
TrustBundle []*x509.Certificate
|
||||
|
||||
// Sources for getting Trust Bundles
|
||||
TrustBundleSources *trustbundlesources.Bundle
|
||||
|
||||
// Join token to use for attestation, if needed
|
||||
JoinToken string
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
|
@ -356,9 +357,7 @@ type validationContextBuilder interface {
|
|||
|
||||
func (h *Handler) getValidationContextBuilder(req *discovery_v3.DiscoveryRequest, upd *cache.WorkloadUpdate) (validationContextBuilder, error) {
|
||||
federatedBundles := make(map[spiffeid.TrustDomain]*spiffebundle.Bundle)
|
||||
for td, federatedBundle := range upd.FederatedBundles {
|
||||
federatedBundles[td] = federatedBundle
|
||||
}
|
||||
maps.Copy(federatedBundles, upd.FederatedBundles)
|
||||
if !h.isSPIFFECertValidationDisabled(req) && supportsSPIFFEAuthExtension(req) {
|
||||
return newSpiffeBuilder(upd.Bundle, federatedBundles)
|
||||
}
|
||||
|
@ -423,9 +422,7 @@ func newSpiffeBuilder(tdBundle *spiffebundle.Bundle, federatedBundles map[spiffe
|
|||
}
|
||||
|
||||
// Add all federated bundles
|
||||
for td, bundle := range federatedBundles {
|
||||
bundles[td] = bundle
|
||||
}
|
||||
maps.Copy(bundles, federatedBundles)
|
||||
|
||||
return &spiffeBuilder{
|
||||
bundles: bundles,
|
||||
|
|
|
@ -1198,15 +1198,6 @@ func TestFetchSecrets(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func DeltaSecretsTest(t *testing.T) {
|
||||
test := setupTest(t)
|
||||
defer test.cleanup()
|
||||
|
||||
resp, err := test.handler.DeltaSecrets(context.Background())
|
||||
spiretest.RequireGRPCStatus(t, err, codes.Unimplemented, "Method is not implemented")
|
||||
require.Nil(t, resp)
|
||||
}
|
||||
|
||||
func setupTest(t *testing.T) *handlerTest {
|
||||
return setupTestWithManager(t, Config{}, NewFakeManager(t))
|
||||
}
|
||||
|
@ -1434,7 +1425,7 @@ func requireSecrets(t *testing.T, resp *discovery_v3.DiscoveryResponse, expected
|
|||
var actualSecrets []*tls_v3.Secret
|
||||
for _, resource := range resp.Resources {
|
||||
secret := new(tls_v3.Secret)
|
||||
require.NoError(t, resource.UnmarshalTo(secret)) //nolint: scopelint // pointer to resource isn't held
|
||||
require.NoError(t, resource.UnmarshalTo(secret))
|
||||
actualSecrets = append(actualSecrets, secret)
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package cache
|
||||
|
||||
import (
|
||||
"maps"
|
||||
|
||||
"github.com/imkira/go-observer"
|
||||
"github.com/spiffe/go-spiffe/v2/bundle/spiffebundle"
|
||||
"github.com/spiffe/go-spiffe/v2/spiffeid"
|
||||
|
@ -98,8 +100,6 @@ func copyBundleMap(bundles map[spiffeid.TrustDomain]*Bundle) map[spiffeid.TrustD
|
|||
}
|
||||
|
||||
out := make(map[spiffeid.TrustDomain]*Bundle, len(bundles))
|
||||
for key, bundle := range bundles {
|
||||
out[key] = bundle
|
||||
}
|
||||
maps.Copy(out, bundles)
|
||||
return out
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"slices"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
|
@ -166,7 +167,7 @@ func jwtSVIDKey(spiffeID spiffeid.ID, audience []string) string {
|
|||
// item.
|
||||
|
||||
// duplicate and sort the audience slice
|
||||
audience = append([]string(nil), audience...)
|
||||
audience = slices.Clone(audience)
|
||||
sort.Strings(audience)
|
||||
|
||||
_, _ = io.WriteString(h, spiffeID.String())
|
||||
|
|
|
@ -14,7 +14,6 @@ import (
|
|||
"github.com/spiffe/go-spiffe/v2/spiffeid"
|
||||
"github.com/spiffe/spire/pkg/common/backoff"
|
||||
"github.com/spiffe/spire/pkg/common/telemetry"
|
||||
"github.com/spiffe/spire/pkg/common/telemetry/agent"
|
||||
agentmetrics "github.com/spiffe/spire/pkg/common/telemetry/agent"
|
||||
"github.com/spiffe/spire/pkg/common/x509util"
|
||||
"github.com/spiffe/spire/proto/spire/common"
|
||||
|
@ -633,7 +632,7 @@ func (c *LRUCache) notifyTaintedBatchProcessed() {
|
|||
|
||||
// processTaintedSVIDs identifies and removes tainted SVIDs from the cache that have been signed by the given tainted authorities.
|
||||
func (c *LRUCache) processTaintedSVIDs(entryIDs []string, taintedX509Authorities []*x509.Certificate) {
|
||||
counter := telemetry.StartCall(c.metrics, telemetry.CacheManager, agent.CacheTypeWorkload, telemetry.ProcessTaintedX509SVIDs)
|
||||
counter := telemetry.StartCall(c.metrics, telemetry.CacheManager, agentmetrics.CacheTypeWorkload, telemetry.ProcessTaintedX509SVIDs)
|
||||
defer counter.Done(nil)
|
||||
|
||||
taintedSVIDs := 0
|
||||
|
|
|
@ -375,7 +375,7 @@ func TestSVIDRotation(t *testing.T) {
|
|||
|
||||
clk := clock.NewMock(t)
|
||||
|
||||
baseTTL := 3
|
||||
baseTTLSeconds := 3
|
||||
api := newMockAPI(t, &mockAPIConfig{
|
||||
km: km,
|
||||
getAuthorizedEntries: func(*mockAPI, int32, *entryv1.GetAuthorizedEntriesRequest) (*entryv1.GetAuthorizedEntriesResponse, error) {
|
||||
|
@ -384,12 +384,12 @@ func TestSVIDRotation(t *testing.T) {
|
|||
batchNewX509SVIDEntries: func(*mockAPI, int32) []*common.RegistrationEntry {
|
||||
return makeBatchNewX509SVIDEntries("resp1", "resp2")
|
||||
},
|
||||
svidTTL: baseTTL,
|
||||
svidTTL: baseTTLSeconds,
|
||||
clk: clk,
|
||||
})
|
||||
|
||||
baseTTLSeconds := time.Duration(baseTTL) * time.Second
|
||||
baseSVID, baseSVIDKey := api.newSVID(joinTokenID, baseTTLSeconds)
|
||||
baseTTL := time.Duration(baseTTLSeconds) * time.Second
|
||||
baseSVID, baseSVIDKey := api.newSVID(joinTokenID, baseTTL)
|
||||
|
||||
cat := fakeagentcatalog.New()
|
||||
cat.SetKeyManager(km)
|
||||
|
@ -404,7 +404,7 @@ func TestSVIDRotation(t *testing.T) {
|
|||
Storage: openStorage(t, dir),
|
||||
Bundle: api.bundle,
|
||||
Metrics: &telemetry.Blackhole{},
|
||||
RotationInterval: baseTTLSeconds / 2,
|
||||
RotationInterval: baseTTL / 2,
|
||||
SyncInterval: 1 * time.Hour,
|
||||
Clk: clk,
|
||||
WorkloadKeyType: workloadkey.ECP256,
|
||||
|
@ -449,7 +449,7 @@ func TestSVIDRotation(t *testing.T) {
|
|||
// Now advance time enough that the cert is expiring soon enough that the
|
||||
// manager will attempt to rotate, but be unable to since the read lock is
|
||||
// held.
|
||||
clk.Add(baseTTLSeconds)
|
||||
clk.Add(baseTTL)
|
||||
|
||||
closer := runManager(t, m)
|
||||
defer closer()
|
||||
|
@ -467,13 +467,7 @@ func TestSVIDRotation(t *testing.T) {
|
|||
m.GetRotationMtx().RUnlock()
|
||||
|
||||
// Loop until we detect an SVID rotation was called in separate process
|
||||
util.RunWithTimeout(t, time.Minute, func() {
|
||||
for {
|
||||
if wasRotHookCalled() {
|
||||
break
|
||||
}
|
||||
}
|
||||
})
|
||||
require.Eventually(t, wasRotHookCalled, time.Minute, 100*time.Millisecond)
|
||||
|
||||
s := m.GetCurrentCredentials()
|
||||
svid = s.SVID
|
||||
|
@ -1419,7 +1413,7 @@ func TestSurvivesCARotation(t *testing.T) {
|
|||
km := fakeagentkeymanager.New(t, dir)
|
||||
|
||||
clk := clock.NewMock(t)
|
||||
ttl := 3
|
||||
ttlSeconds := 3
|
||||
api := newMockAPI(t, &mockAPIConfig{
|
||||
km: km,
|
||||
getAuthorizedEntries: func(h *mockAPI, count int32, req *entryv1.GetAuthorizedEntriesRequest) (*entryv1.GetAuthorizedEntriesResponse, error) {
|
||||
|
@ -1432,15 +1426,15 @@ func TestSurvivesCARotation(t *testing.T) {
|
|||
clk: clk,
|
||||
// Give a low ttl to get expired entries on each synchronization, forcing
|
||||
// the manager to fetch entries from the server.
|
||||
svidTTL: ttl,
|
||||
svidTTL: ttlSeconds,
|
||||
})
|
||||
|
||||
baseSVID, baseSVIDKey := api.newSVID(joinTokenID, 1*time.Hour)
|
||||
cat := fakeagentcatalog.New()
|
||||
cat.SetKeyManager(km)
|
||||
|
||||
ttlSeconds := time.Duration(ttl) * time.Second
|
||||
syncInterval := ttlSeconds / 2
|
||||
ttl := time.Duration(ttlSeconds) * time.Second
|
||||
syncInterval := ttl / 2
|
||||
c := &Config{
|
||||
ServerAddr: api.addr,
|
||||
SVID: baseSVID,
|
||||
|
|
|
@ -12,7 +12,6 @@ import (
|
|||
"github.com/spiffe/go-spiffe/v2/spiffeid"
|
||||
"github.com/spiffe/spire/pkg/agent/manager/cache"
|
||||
"github.com/spiffe/spire/pkg/common/telemetry"
|
||||
"github.com/spiffe/spire/pkg/common/telemetry/agent"
|
||||
telemetry_agent "github.com/spiffe/spire/pkg/common/telemetry/agent"
|
||||
"github.com/spiffe/spire/pkg/common/x509util"
|
||||
"github.com/spiffe/spire/proto/spire/common"
|
||||
|
@ -229,7 +228,7 @@ func (c *Cache) TaintX509SVIDs(ctx context.Context, taintedX509Authorities []*x5
|
|||
c.mtx.Lock()
|
||||
defer c.mtx.Unlock()
|
||||
|
||||
counter := telemetry.StartCall(c.c.Metrics, telemetry.CacheManager, agent.CacheTypeSVIDStore, telemetry.ProcessTaintedX509SVIDs)
|
||||
counter := telemetry.StartCall(c.c.Metrics, telemetry.CacheManager, telemetry_agent.CacheTypeSVIDStore, telemetry.ProcessTaintedX509SVIDs)
|
||||
defer counter.Done(nil)
|
||||
|
||||
taintedSVIDs := 0
|
||||
|
@ -253,7 +252,7 @@ func (c *Cache) TaintX509SVIDs(ctx context.Context, taintedX509Authorities []*x5
|
|||
}
|
||||
}
|
||||
|
||||
telemetry_agent.AddCacheManagerExpiredSVIDsSample(c.c.Metrics, agent.CacheTypeSVIDStore, float32(taintedSVIDs))
|
||||
telemetry_agent.AddCacheManagerExpiredSVIDsSample(c.c.Metrics, telemetry_agent.CacheTypeSVIDStore, float32(taintedSVIDs))
|
||||
c.c.Log.WithField(telemetry.TaintedX509SVIDs, taintedSVIDs).Info("Tainted X.509 SVIDs")
|
||||
}
|
||||
|
||||
|
|
|
@ -16,7 +16,6 @@ import (
|
|||
"github.com/spiffe/spire/pkg/agent/workloadkey"
|
||||
"github.com/spiffe/spire/pkg/common/bundleutil"
|
||||
"github.com/spiffe/spire/pkg/common/telemetry"
|
||||
"github.com/spiffe/spire/pkg/common/telemetry/agent"
|
||||
telemetry_agent "github.com/spiffe/spire/pkg/common/telemetry/agent"
|
||||
"github.com/spiffe/spire/pkg/common/util"
|
||||
"github.com/spiffe/spire/pkg/common/x509util"
|
||||
|
@ -111,11 +110,11 @@ func (m *manager) synchronize(ctx context.Context) (err error) {
|
|||
return err
|
||||
}
|
||||
|
||||
if err := m.updateCache(ctx, cacheUpdate, m.c.Log.WithField(telemetry.CacheType, agent.CacheTypeWorkload), "", m.cache); err != nil {
|
||||
if err := m.updateCache(ctx, cacheUpdate, m.c.Log.WithField(telemetry.CacheType, telemetry_agent.CacheTypeWorkload), "", m.cache); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := m.updateCache(ctx, storeUpdate, m.c.Log.WithField(telemetry.CacheType, agent.CacheTypeSVIDStore), agent.CacheTypeSVIDStore, m.svidStoreCache); err != nil {
|
||||
if err := m.updateCache(ctx, storeUpdate, m.c.Log.WithField(telemetry.CacheType, telemetry_agent.CacheTypeSVIDStore), telemetry_agent.CacheTypeSVIDStore, m.svidStoreCache); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
"sync"
|
||||
|
||||
keymanagerv1 "github.com/spiffe/spire-plugin-sdk/proto/spire/plugin/agent/keymanager/v1"
|
||||
"github.com/spiffe/spire/pkg/common/util"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
@ -37,10 +38,10 @@ type Config struct {
|
|||
}
|
||||
|
||||
type Generator interface {
|
||||
GenerateRSA2048Key() (*rsa.PrivateKey, error)
|
||||
GenerateRSA4096Key() (*rsa.PrivateKey, error)
|
||||
GenerateEC256Key() (*ecdsa.PrivateKey, error)
|
||||
GenerateEC384Key() (*ecdsa.PrivateKey, error)
|
||||
GenerateRSA2048Key() (crypto.Signer, error)
|
||||
GenerateRSA4096Key() (crypto.Signer, error)
|
||||
GenerateEC256Key() (crypto.Signer, error)
|
||||
GenerateEC384Key() (crypto.Signer, error)
|
||||
}
|
||||
|
||||
// Base is the base KeyManager implementation
|
||||
|
@ -170,7 +171,7 @@ func (m *Base) signData(req *keymanagerv1.SignDataRequest) (*keymanagerv1.SignDa
|
|||
if opts.HashAlgorithm == keymanagerv1.HashAlgorithm_UNSPECIFIED_HASH_ALGORITHM {
|
||||
return nil, status.Error(codes.InvalidArgument, "hash algorithm is required")
|
||||
}
|
||||
signerOpts = crypto.Hash(opts.HashAlgorithm)
|
||||
signerOpts = util.MustCast[crypto.Hash](opts.HashAlgorithm)
|
||||
case *keymanagerv1.SignDataRequest_PssOptions:
|
||||
if opts.PssOptions == nil {
|
||||
return nil, status.Error(codes.InvalidArgument, "PSS options are nil")
|
||||
|
@ -180,7 +181,7 @@ func (m *Base) signData(req *keymanagerv1.SignDataRequest) (*keymanagerv1.SignDa
|
|||
}
|
||||
signerOpts = &rsa.PSSOptions{
|
||||
SaltLength: int(opts.PssOptions.SaltLength),
|
||||
Hash: crypto.Hash(opts.PssOptions.HashAlgorithm),
|
||||
Hash: util.MustCast[crypto.Hash](opts.PssOptions.HashAlgorithm),
|
||||
}
|
||||
default:
|
||||
return nil, status.Errorf(codes.InvalidArgument, "unsupported signer opts type %T", opts)
|
||||
|
@ -299,19 +300,19 @@ func ecdsaKeyType(privateKey *ecdsa.PrivateKey) (keymanagerv1.KeyType, error) {
|
|||
|
||||
type defaultGenerator struct{}
|
||||
|
||||
func (defaultGenerator) GenerateRSA2048Key() (*rsa.PrivateKey, error) {
|
||||
func (defaultGenerator) GenerateRSA2048Key() (crypto.Signer, error) {
|
||||
return rsa.GenerateKey(rand.Reader, 2048)
|
||||
}
|
||||
|
||||
func (defaultGenerator) GenerateRSA4096Key() (*rsa.PrivateKey, error) {
|
||||
func (defaultGenerator) GenerateRSA4096Key() (crypto.Signer, error) {
|
||||
return rsa.GenerateKey(rand.Reader, 4096)
|
||||
}
|
||||
|
||||
func (defaultGenerator) GenerateEC256Key() (*ecdsa.PrivateKey, error) {
|
||||
func (defaultGenerator) GenerateEC256Key() (crypto.Signer, error) {
|
||||
return ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
}
|
||||
|
||||
func (defaultGenerator) GenerateEC384Key() (*ecdsa.PrivateKey, error) {
|
||||
func (defaultGenerator) GenerateEC384Key() (crypto.Signer, error) {
|
||||
return ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
|
||||
}
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ import (
|
|||
keymanagerv1 "github.com/spiffe/spire-plugin-sdk/proto/spire/plugin/agent/keymanager/v1"
|
||||
configv1 "github.com/spiffe/spire-plugin-sdk/proto/spire/service/common/config/v1"
|
||||
keymanagerbase "github.com/spiffe/spire/pkg/agent/plugin/keymanager/base"
|
||||
catalog "github.com/spiffe/spire/pkg/common/catalog"
|
||||
"github.com/spiffe/spire/pkg/common/catalog"
|
||||
"github.com/spiffe/spire/pkg/common/diskutil"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
|
||||
keymanagerv1 "github.com/spiffe/spire-plugin-sdk/proto/spire/plugin/agent/keymanager/v1"
|
||||
"github.com/spiffe/spire/pkg/common/plugin"
|
||||
"github.com/spiffe/spire/pkg/common/util"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
@ -118,7 +119,7 @@ func (v1 *V1) convertKeyType(t KeyType) (keymanagerv1.KeyType, error) {
|
|||
|
||||
func (v1 *V1) convertHashAlgorithm(h crypto.Hash) keymanagerv1.HashAlgorithm {
|
||||
// Hash algorithm constants are aligned.
|
||||
return keymanagerv1.HashAlgorithm(h)
|
||||
return util.MustCast[keymanagerv1.HashAlgorithm](h)
|
||||
}
|
||||
|
||||
type v1Key struct {
|
||||
|
@ -155,7 +156,7 @@ func (s *v1Key) signContext(ctx context.Context, digest []byte, opts crypto.Sign
|
|||
case *rsa.PSSOptions:
|
||||
req.SignerOpts = &keymanagerv1.SignDataRequest_PssOptions{
|
||||
PssOptions: &keymanagerv1.SignDataRequest_PSSOptions{
|
||||
SaltLength: int32(opts.SaltLength),
|
||||
SaltLength: util.MustCast[int32](opts.SaltLength),
|
||||
HashAlgorithm: s.v1.convertHashAlgorithm(opts.Hash),
|
||||
},
|
||||
}
|
||||
|
|
|
@ -22,10 +22,10 @@ import (
|
|||
"github.com/spiffe/spire/pkg/agent/plugin/nodeattestor"
|
||||
nodeattestortest "github.com/spiffe/spire/pkg/agent/plugin/nodeattestor/test"
|
||||
"github.com/spiffe/spire/pkg/common/catalog"
|
||||
"github.com/spiffe/spire/pkg/common/pemutil"
|
||||
"github.com/spiffe/spire/pkg/common/plugin/aws"
|
||||
"github.com/spiffe/spire/test/plugintest"
|
||||
"github.com/spiffe/spire/test/spiretest"
|
||||
"github.com/spiffe/spire/test/testkey"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
|
@ -38,19 +38,7 @@ const (
|
|||
)
|
||||
|
||||
var (
|
||||
signingKeyPEM = []byte(`-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIBywIBAAJhAOn4rFLlxONpujl+q/h/kTQzZoqn1nQZbCKEyIPBWO6kkcSqIqON
|
||||
aB3i+xyxgZNwkGEkLGRl/Uwasbp7O/sU43wh5ywWp/AG0iFe1RhwMd8LMq5ron6o
|
||||
s2eql71hJKsGEwIDAQABAmEAoDa9YcKe8Q68C5TXE8He33z3Ealea3/hET4VxEsI
|
||||
p9mfS6kpMQ+qpRSB2aMfVKP1mrAQ4/5TarrG1ZG3T/Mt9Oy1QHbzALvz2XObIvcR
|
||||
0cnG353CLQK/nobvWcwAtac5AjEA9k+1a9R6eFaO3grl9yg5XY2+MboV4wjbsDS3
|
||||
s4+MivneTPwvK6eHxtoAlYCNOAslAjEA8yy0PJw3TLBK80DryF3r/Q4wd4uYeFhN
|
||||
G6EBF0LccLB7GbKpcDHgnNjW/wObx+LXAjBeP4/G6+3U4CIYuojWMvEIaDVPp8m6
|
||||
LuiJGxLzxUjc4NF8Gb8e8CLXJxG0IxVmTXUCMQDSPJAG5rgYoUHrVPGEZU8llSLp
|
||||
99J2GUFw5Z3f0nprIukKqqA606RxdjdKeoAwLDkCMCptc0jZR3VM4w1wnwvAe0FL
|
||||
t61Ol/Q+OqWFX74JwsUU56FqPFm3Y9k7HxDILdedoQ==
|
||||
-----END RSA PRIVATE KEY-----`)
|
||||
|
||||
signingKey = testkey.MustRSA2048()
|
||||
streamBuilder = nodeattestortest.ServerStream(aws.PluginName)
|
||||
)
|
||||
|
||||
|
@ -180,15 +168,13 @@ func (s *Suite) buildDefaultIIDDocAndSig() (docBytes []byte, sigBytes []byte, si
|
|||
s.Require().NoError(err)
|
||||
|
||||
rng := rand.Reader
|
||||
key, err := pemutil.ParseRSAPrivateKey(signingKeyPEM)
|
||||
s.Require().NoError(err)
|
||||
|
||||
// doc signature
|
||||
docHash := sha256.Sum256(docBytes)
|
||||
sig, err := rsa.SignPKCS1v15(rng, key, crypto.SHA256, docHash[:])
|
||||
sig, err := rsa.SignPKCS1v15(rng, signingKey, crypto.SHA256, docHash[:])
|
||||
s.Require().NoError(err)
|
||||
|
||||
sigRSA2048 = s.generatePKCS7Signature(docBytes, key)
|
||||
sigRSA2048 = s.generatePKCS7Signature(docBytes, signingKey)
|
||||
|
||||
return docBytes, sig, sigRSA2048
|
||||
}
|
||||
|
|
|
@ -64,7 +64,7 @@ func (s *MSIAttestorSuite) TestAidAttestationFailedToObtainToken() {
|
|||
func (s *MSIAttestorSuite) TestAidAttestationSuccess() {
|
||||
s.token = s.makeAccessToken("PRINCIPALID", "TENANTID")
|
||||
|
||||
expectPayload := []byte(fmt.Sprintf(`{"token":%q}`, s.token))
|
||||
expectPayload := fmt.Appendf(nil, `{"token":%q}`, s.token)
|
||||
|
||||
attestor := s.loadAttestor(
|
||||
plugintest.CoreConfig(catalog.CoreConfig{
|
||||
|
|
|
@ -13,27 +13,15 @@ import (
|
|||
"github.com/spiffe/spire/pkg/agent/plugin/nodeattestor"
|
||||
nodeattestortest "github.com/spiffe/spire/pkg/agent/plugin/nodeattestor/test"
|
||||
"github.com/spiffe/spire/pkg/common/catalog"
|
||||
"github.com/spiffe/spire/pkg/common/pemutil"
|
||||
sat_common "github.com/spiffe/spire/pkg/common/plugin/k8s"
|
||||
"github.com/spiffe/spire/test/plugintest"
|
||||
"github.com/spiffe/spire/test/spiretest"
|
||||
"github.com/spiffe/spire/test/testkey"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
var sampleKeyPEM = []byte(`-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIBywIBAAJhAMB4gbT09H2RKXaxbu6IV9C3WY+pvkGAbrlQRIHLHwV3Xt1HchjX
|
||||
c08v1VEoTBN2YTjhZJlDb/VUsNMJsmBFBBted5geRcbrDtXFlUJ8tQoQx1dWM4Aa
|
||||
xcdULJ83A9ICKwIDAQABAmBR1asInrIphYQEtHJ/NzdnRd3tqHV9cjch0dAfA5dA
|
||||
Ar4yBYOsrkaX37WqWSDnkYgN4FWYBWn7WxeotCtA5UQ3SM5hLld67rUqAm2dLrs1
|
||||
z8va6SwLzrPTu2+rmRgovFECMQDpbfPBRex7FY/xWu1pYv6X9XZ26SrC2Wc6RIpO
|
||||
38AhKGjTFEMAPJQlud4e2+4I3KkCMQDTFLUvBSXokw2NvcNiM9Kqo5zCnCIkgc+C
|
||||
hM3EzSh2jh4gZvRzPOhXYvNKgLx8+LMCMQDL4meXlpV45Fp3eu4GsJqi65jvP7VD
|
||||
v1P0hs0vGyvbSkpUo0vqNv9G/FNQLNR6FRECMFXEMz5wxA91OOuf8HTFg9Lr+fUl
|
||||
RcY5rJxm48kUZ12Mr3cQ/kCYvftL7HkYR/4rewIxANdritlIPu4VziaEhYZg7dvz
|
||||
pG3eEhiqPxE++QHpwU78O+F1GznOPBvpZOB3GfyjNQ==
|
||||
-----END RSA PRIVATE KEY-----`)
|
||||
|
||||
var (
|
||||
sampleKey = testkey.MustRSA2048()
|
||||
streamBuilder = nodeattestortest.ServerStream(pluginName)
|
||||
)
|
||||
|
||||
|
@ -76,7 +64,7 @@ func (s *AttestorSuite) TestAttestSuccess() {
|
|||
|
||||
na := s.loadPluginWithTokenPath(s.writeValue("token", token))
|
||||
|
||||
err = na.Attest(context.Background(), streamBuilder.ExpectAndBuild([]byte(fmt.Sprintf(`{"cluster":"production","token":"%s"}`, token))))
|
||||
err = na.Attest(context.Background(), streamBuilder.ExpectAndBuild(fmt.Appendf(nil, `{"cluster":"production","token":"%s"}`, token)))
|
||||
s.Require().NoError(err)
|
||||
}
|
||||
|
||||
|
@ -168,11 +156,6 @@ func createPSAT(namespace, podName string) (string, error) {
|
|||
}
|
||||
|
||||
func createSigner() (jose.Signer, error) {
|
||||
sampleKey, err := pemutil.ParseRSAPrivateKey(sampleKeyPEM)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sampleSigner, err := jose.NewSigner(jose.SigningKey{
|
||||
Algorithm: jose.RS256,
|
||||
Key: sampleKey,
|
||||
|
|
|
@ -1,156 +0,0 @@
|
|||
package k8ssat
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/hcl"
|
||||
nodeattestorv1 "github.com/spiffe/spire-plugin-sdk/proto/spire/plugin/agent/nodeattestor/v1"
|
||||
configv1 "github.com/spiffe/spire-plugin-sdk/proto/spire/service/common/config/v1"
|
||||
"github.com/spiffe/spire/pkg/common/catalog"
|
||||
"github.com/spiffe/spire/pkg/common/plugin/k8s"
|
||||
"github.com/spiffe/spire/pkg/common/pluginconf"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
const (
|
||||
pluginName = "k8s_sat"
|
||||
|
||||
defaultTokenPath = "/var/run/secrets/kubernetes.io/serviceaccount/token" //nolint: gosec // false positive
|
||||
)
|
||||
|
||||
func BuiltIn() catalog.BuiltIn {
|
||||
return builtin(New())
|
||||
}
|
||||
|
||||
func builtin(p *AttestorPlugin) catalog.BuiltIn {
|
||||
return catalog.MakeBuiltIn(pluginName,
|
||||
nodeattestorv1.NodeAttestorPluginServer(p),
|
||||
configv1.ConfigServiceServer(p))
|
||||
}
|
||||
|
||||
type AttestorConfig struct {
|
||||
Cluster string `hcl:"cluster"`
|
||||
TokenPath string `hcl:"token_path"`
|
||||
}
|
||||
|
||||
func buildConfig(coreConfig catalog.CoreConfig, hclText string, status *pluginconf.Status) *attestorConfig {
|
||||
hclConfig := new(AttestorConfig)
|
||||
if err := hcl.Decode(hclConfig, hclText); err != nil {
|
||||
status.ReportErrorf("unable to decode configuration: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
if hclConfig.Cluster == "" {
|
||||
status.ReportError("configuration missing cluster")
|
||||
}
|
||||
|
||||
newConfig := &attestorConfig{
|
||||
cluster: hclConfig.Cluster,
|
||||
tokenPath: hclConfig.TokenPath,
|
||||
}
|
||||
|
||||
if newConfig.tokenPath == "" {
|
||||
newConfig.tokenPath = getDefaultTokenPath()
|
||||
}
|
||||
|
||||
return newConfig
|
||||
}
|
||||
|
||||
type attestorConfig struct {
|
||||
cluster string
|
||||
tokenPath string
|
||||
}
|
||||
|
||||
type AttestorPlugin struct {
|
||||
nodeattestorv1.UnsafeNodeAttestorServer
|
||||
configv1.UnsafeConfigServer
|
||||
log hclog.Logger
|
||||
|
||||
mu sync.RWMutex
|
||||
config *attestorConfig
|
||||
}
|
||||
|
||||
func New() *AttestorPlugin {
|
||||
return &AttestorPlugin{}
|
||||
}
|
||||
|
||||
// SetLogger sets a logger in the plugin.
|
||||
func (p *AttestorPlugin) SetLogger(log hclog.Logger) {
|
||||
p.log = log
|
||||
}
|
||||
|
||||
func (p *AttestorPlugin) AidAttestation(stream nodeattestorv1.NodeAttestor_AidAttestationServer) error {
|
||||
config, err := p.getConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
token, err := loadTokenFromFile(config.tokenPath)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.InvalidArgument, "unable to load token from %s: %v", config.tokenPath, err)
|
||||
}
|
||||
|
||||
payload, err := json.Marshal(k8s.SATAttestationData{
|
||||
Cluster: config.cluster,
|
||||
Token: token,
|
||||
})
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "unable to marshal SAT token data: %v", err)
|
||||
}
|
||||
|
||||
return stream.Send(&nodeattestorv1.PayloadOrChallengeResponse{
|
||||
Data: &nodeattestorv1.PayloadOrChallengeResponse_Payload{
|
||||
Payload: payload,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func (p *AttestorPlugin) Configure(_ context.Context, req *configv1.ConfigureRequest) (resp *configv1.ConfigureResponse, err error) {
|
||||
p.log.Warn(fmt.Sprintf("The %q node attestor plugin has been deprecated in favor of the \"k8s_psat\" plugin and will be removed in a future release", pluginName))
|
||||
|
||||
newConfig, _, err := pluginconf.Build(req, buildConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.config = newConfig
|
||||
|
||||
return &configv1.ConfigureResponse{}, nil
|
||||
}
|
||||
|
||||
func (p *AttestorPlugin) Validate(_ context.Context, req *configv1.ValidateRequest) (resp *configv1.ValidateResponse, err error) {
|
||||
_, notes, err := pluginconf.Build(req, buildConfig)
|
||||
|
||||
return &configv1.ValidateResponse{
|
||||
Valid: err == nil,
|
||||
Notes: notes,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *AttestorPlugin) getConfig() (*attestorConfig, error) {
|
||||
p.mu.RLock()
|
||||
defer p.mu.RUnlock()
|
||||
if p.config == nil {
|
||||
return nil, status.Error(codes.FailedPrecondition, "not configured")
|
||||
}
|
||||
return p.config, nil
|
||||
}
|
||||
|
||||
func loadTokenFromFile(path string) (string, error) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(data) == 0 {
|
||||
return "", fmt.Errorf("%q is empty", path)
|
||||
}
|
||||
return string(data), nil
|
||||
}
|
|
@ -1,7 +0,0 @@
|
|||
//go:build !windows
|
||||
|
||||
package k8ssat
|
||||
|
||||
func getDefaultTokenPath() string {
|
||||
return defaultTokenPath
|
||||
}
|
|
@ -1,40 +0,0 @@
|
|||
//go:build !windows
|
||||
|
||||
package k8ssat
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/spiffe/go-spiffe/v2/spiffeid"
|
||||
"github.com/spiffe/spire/pkg/agent/plugin/nodeattestor"
|
||||
"github.com/spiffe/spire/pkg/common/catalog"
|
||||
"github.com/spiffe/spire/test/plugintest"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestConfigureDefaultToken(t *testing.T) {
|
||||
p := New()
|
||||
var err error
|
||||
plugintest.Load(t, builtin(p), new(nodeattestor.V1),
|
||||
plugintest.CaptureConfigureError(&err),
|
||||
plugintest.CoreConfig(catalog.CoreConfig{
|
||||
TrustDomain: spiffeid.RequireTrustDomainFromString("example.org"),
|
||||
}),
|
||||
plugintest.Configure(`cluster = "production"`),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "/var/run/secrets/kubernetes.io/serviceaccount/token", p.config.tokenPath)
|
||||
|
||||
plugintest.Load(t, builtin(p), new(nodeattestor.V1),
|
||||
plugintest.CaptureConfigureError(&err),
|
||||
plugintest.CoreConfig(catalog.CoreConfig{
|
||||
TrustDomain: spiffeid.RequireTrustDomainFromString("example.org"),
|
||||
}),
|
||||
plugintest.Configure(`
|
||||
cluster = "production"
|
||||
token_path = "/tmp/token"`),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, "/tmp/token", p.config.tokenPath)
|
||||
}
|
|
@ -1,120 +0,0 @@
|
|||
package k8ssat
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/spiffe/go-spiffe/v2/spiffeid"
|
||||
"github.com/spiffe/spire/pkg/agent/plugin/nodeattestor"
|
||||
nodeattestortest "github.com/spiffe/spire/pkg/agent/plugin/nodeattestor/test"
|
||||
"github.com/spiffe/spire/pkg/common/catalog"
|
||||
"github.com/spiffe/spire/test/plugintest"
|
||||
"github.com/spiffe/spire/test/spiretest"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
var (
|
||||
streamBuilder = nodeattestortest.ServerStream(pluginName)
|
||||
)
|
||||
|
||||
func TestAttestorPlugin(t *testing.T) {
|
||||
spiretest.Run(t, new(AttestorSuite))
|
||||
}
|
||||
|
||||
type AttestorSuite struct {
|
||||
spiretest.Suite
|
||||
|
||||
dir string
|
||||
}
|
||||
|
||||
func (s *AttestorSuite) SetupTest() {
|
||||
s.dir = s.TempDir()
|
||||
}
|
||||
|
||||
func (s *AttestorSuite) TestAttestNotConfigured() {
|
||||
na := s.loadPlugin()
|
||||
err := na.Attest(context.Background(), streamBuilder.Build())
|
||||
s.RequireGRPCStatus(err, codes.FailedPrecondition, "nodeattestor(k8s_sat): not configured")
|
||||
}
|
||||
|
||||
func (s *AttestorSuite) TestAttestNoToken() {
|
||||
na := s.loadPluginWithTokenPath("example.org", s.joinPath("token"))
|
||||
err := na.Attest(context.Background(), streamBuilder.Build())
|
||||
s.RequireGRPCStatusContains(err, codes.InvalidArgument, "nodeattestor(k8s_sat): unable to load token from")
|
||||
}
|
||||
|
||||
func (s *AttestorSuite) TestAttestEmptyToken() {
|
||||
na := s.loadPluginWithTokenPath("example.org", s.writeValue("token", ""))
|
||||
err := na.Attest(context.Background(), streamBuilder.Build())
|
||||
s.RequireGRPCStatusContains(err, codes.InvalidArgument, "nodeattestor(k8s_sat): unable to load token from")
|
||||
}
|
||||
|
||||
func (s *AttestorSuite) TestAttestSuccess() {
|
||||
na := s.loadPluginWithTokenPath("example.org", s.writeValue("token", "TOKEN"))
|
||||
|
||||
err := na.Attest(context.Background(), streamBuilder.ExpectAndBuild([]byte(`{"cluster":"production","token":"TOKEN"}`)))
|
||||
s.Require().NoError(err)
|
||||
}
|
||||
|
||||
func (s *AttestorSuite) TestConfigure() {
|
||||
var err error
|
||||
|
||||
// malformed configuration
|
||||
s.loadPlugin(plugintest.CaptureConfigureError(&err),
|
||||
plugintest.CoreConfig(catalog.CoreConfig{
|
||||
TrustDomain: spiffeid.RequireTrustDomainFromString("example.org"),
|
||||
}),
|
||||
plugintest.Configure("malformed"),
|
||||
)
|
||||
s.RequireGRPCStatusContains(err, codes.InvalidArgument, "unable to decode configuration")
|
||||
|
||||
// missing cluster
|
||||
s.loadPlugin(plugintest.CaptureConfigureError(&err),
|
||||
plugintest.CoreConfig(catalog.CoreConfig{
|
||||
TrustDomain: spiffeid.RequireTrustDomainFromString("example.org"),
|
||||
}),
|
||||
plugintest.Configure(""),
|
||||
)
|
||||
s.RequireGRPCStatus(err, codes.InvalidArgument, "configuration missing cluster")
|
||||
|
||||
// success
|
||||
s.loadPlugin(plugintest.CaptureConfigureError(&err),
|
||||
plugintest.CoreConfig(catalog.CoreConfig{
|
||||
TrustDomain: spiffeid.RequireTrustDomainFromString("example.org"),
|
||||
}),
|
||||
plugintest.Configure(`cluster = "production"`),
|
||||
)
|
||||
s.Require().NoError(err)
|
||||
}
|
||||
|
||||
func (s *AttestorSuite) loadPluginWithTokenPath(trustDomain string, tokenPath string) nodeattestor.NodeAttestor {
|
||||
return s.loadPlugin(
|
||||
plugintest.CoreConfig(catalog.CoreConfig{
|
||||
TrustDomain: spiffeid.RequireTrustDomainFromString(trustDomain),
|
||||
}),
|
||||
plugintest.Configuref(`
|
||||
cluster = "production"
|
||||
token_path = %q`, tokenPath),
|
||||
)
|
||||
}
|
||||
|
||||
func (s *AttestorSuite) loadPlugin(options ...plugintest.Option) nodeattestor.NodeAttestor {
|
||||
na := new(nodeattestor.V1)
|
||||
plugintest.Load(s.T(), BuiltIn(), na, options...)
|
||||
return na
|
||||
}
|
||||
|
||||
func (s *AttestorSuite) joinPath(path string) string {
|
||||
return filepath.Join(s.dir, path)
|
||||
}
|
||||
|
||||
func (s *AttestorSuite) writeValue(path, data string) string {
|
||||
valuePath := s.joinPath(path)
|
||||
err := os.MkdirAll(filepath.Dir(valuePath), 0755)
|
||||
s.Require().NoError(err)
|
||||
err = os.WriteFile(valuePath, []byte(data), 0600)
|
||||
s.Require().NoError(err)
|
||||
return valuePath
|
||||
}
|
|
@ -1,20 +0,0 @@
|
|||
//go:build windows
|
||||
|
||||
package k8ssat
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
const (
|
||||
containerMountPointEnvVar = "CONTAINER_SANDBOX_MOUNT_POINT"
|
||||
)
|
||||
|
||||
func getDefaultTokenPath() string {
|
||||
mountPoint := os.Getenv(containerMountPointEnvVar)
|
||||
if mountPoint == "" {
|
||||
return filepath.FromSlash(defaultTokenPath)
|
||||
}
|
||||
return filepath.Join(mountPoint, defaultTokenPath)
|
||||
}
|
|
@ -1,65 +0,0 @@
|
|||
//go:build windows
|
||||
|
||||
package k8ssat
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/spiffe/go-spiffe/v2/spiffeid"
|
||||
"github.com/spiffe/spire/pkg/agent/plugin/nodeattestor"
|
||||
"github.com/spiffe/spire/pkg/common/catalog"
|
||||
"github.com/spiffe/spire/test/plugintest"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestConfigureDefaultToken(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
trustDomain string
|
||||
mountPoint string
|
||||
config string
|
||||
expectTokenPath string
|
||||
}{
|
||||
{
|
||||
name: "mountPoint set",
|
||||
trustDomain: "example.org",
|
||||
mountPoint: "c:\\somepath",
|
||||
config: `cluster = "production"`,
|
||||
expectTokenPath: "c:\\somepath\\var\\run\\secrets\\kubernetes.io\\serviceaccount\\token",
|
||||
},
|
||||
{
|
||||
name: "no mountPoint",
|
||||
trustDomain: "example.org",
|
||||
config: `cluster = "production"`,
|
||||
expectTokenPath: "\\var\\run\\secrets\\kubernetes.io\\serviceaccount\\token",
|
||||
},
|
||||
{
|
||||
name: "token path set on configuration",
|
||||
trustDomain: "example.org",
|
||||
mountPoint: "c:\\somepath",
|
||||
config: `
|
||||
cluster = "production"
|
||||
token_path = "c:\\token"`,
|
||||
expectTokenPath: "c:\\token",
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.mountPoint != "" {
|
||||
t.Setenv(containerMountPointEnvVar, tt.mountPoint)
|
||||
}
|
||||
|
||||
p := New()
|
||||
var err error
|
||||
plugintest.Load(t, builtin(p), new(nodeattestor.V1),
|
||||
plugintest.CaptureConfigureError(&err),
|
||||
plugintest.CoreConfig(catalog.CoreConfig{
|
||||
TrustDomain: spiffeid.RequireTrustDomainFromString(tt.trustDomain),
|
||||
}),
|
||||
plugintest.Configure(tt.config),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, tt.expectTokenPath, p.config.tokenPath)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"github.com/spiffe/spire/pkg/agent/plugin/nodeattestor"
|
||||
"google.golang.org/grpc/codes"
|
||||
|
@ -82,7 +83,7 @@ func (b *ServerStreamBuilder) FailAndBuild(err error) nodeattestor.ServerStream
|
|||
}
|
||||
|
||||
func (b *ServerStreamBuilder) addHandler(handler ServerStreamHandler) *ServerStreamBuilder {
|
||||
handlers := append([]ServerStreamHandler(nil), b.handlers...)
|
||||
handlers := slices.Clone(b.handlers)
|
||||
handlers = append(handlers, handler)
|
||||
return &ServerStreamBuilder{
|
||||
pluginName: b.pluginName,
|
||||
|
|
|
@ -7,8 +7,8 @@ import (
|
|||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
dockerclient "github.com/docker/docker/client"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/hashicorp/hcl"
|
||||
|
@ -43,8 +43,8 @@ func builtin(p *Plugin) catalog.BuiltIn {
|
|||
|
||||
// Docker is a subset of the docker client functionality, useful for mocking.
|
||||
type Docker interface {
|
||||
ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error)
|
||||
ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error)
|
||||
ContainerInspect(ctx context.Context, containerID string) (container.InspectResponse, error)
|
||||
ImageInspectWithRaw(ctx context.Context, imageID string) (image.InspectResponse, []byte, error)
|
||||
}
|
||||
|
||||
type Plugin struct {
|
||||
|
@ -74,7 +74,7 @@ type dockerPluginConfig struct {
|
|||
|
||||
UnusedKeyPositions map[string][]token.Pos `hcl:",unusedKeyPositions"`
|
||||
|
||||
Experimental experimentalConfig `hcl:"experimental,omitempty" json:"experimental,omitempty"`
|
||||
Experimental experimentalConfig `hcl:"experimental,omitempty" json:"experimental"`
|
||||
|
||||
containerHelper *containerHelper
|
||||
dockerOpts []dockerclient.Opt
|
||||
|
@ -140,13 +140,10 @@ func (p *Plugin) Attest(ctx context.Context, req *workloadattestorv1.AttestReque
|
|||
return &workloadattestorv1.AttestResponse{}, nil
|
||||
}
|
||||
|
||||
var container types.ContainerJSON
|
||||
var container container.InspectResponse
|
||||
err = p.retryer.Retry(ctx, func() error {
|
||||
container, err = p.docker.ContainerInspect(ctx, containerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -92,7 +92,7 @@ func (h *containerHelper) getContainerID(pID int32, log hclog.Logger) (string, e
|
|||
}
|
||||
|
||||
extractor := containerinfo.Extractor{RootDir: h.rootDir, VerboseLogging: h.verboseContainerLocatorLogs}
|
||||
return extractor.GetContainerID(int(pID), log)
|
||||
return extractor.GetContainerID(pID, log)
|
||||
}
|
||||
|
||||
func getDockerHost(c *dockerPluginConfig) string {
|
||||
|
|
|
@ -8,8 +8,8 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
dockerclient "github.com/docker/docker/client"
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/spiffe/go-spiffe/v2/spiffeid"
|
||||
|
@ -236,7 +236,6 @@ func TestDockerConfigDefault(t *testing.T) {
|
|||
|
||||
require.NotNil(t, p.docker)
|
||||
require.Equal(t, dockerclient.DefaultDockerHost, p.docker.(*dockerclient.Client).DaemonHost())
|
||||
require.Equal(t, "1.47", p.docker.(*dockerclient.Client).ClientVersion())
|
||||
verifyConfigDefault(t, p.c)
|
||||
}
|
||||
|
||||
|
@ -405,28 +404,28 @@ func newTestPlugin(t *testing.T, opts ...testPluginOpt) *Plugin {
|
|||
|
||||
type dockerError struct{}
|
||||
|
||||
func (dockerError) ContainerInspect(context.Context, string) (types.ContainerJSON, error) {
|
||||
return types.ContainerJSON{}, errors.New("docker error")
|
||||
func (dockerError) ContainerInspect(context.Context, string) (container.InspectResponse, error) {
|
||||
return container.InspectResponse{}, errors.New("docker error")
|
||||
}
|
||||
|
||||
func (dockerError) ImageInspectWithRaw(context.Context, string) (types.ImageInspect, []byte, error) {
|
||||
return types.ImageInspect{}, nil, errors.New("docker error")
|
||||
func (dockerError) ImageInspectWithRaw(context.Context, string) (image.InspectResponse, []byte, error) {
|
||||
return image.InspectResponse{}, nil, errors.New("docker error")
|
||||
}
|
||||
|
||||
type fakeContainer container.Config
|
||||
|
||||
func (f fakeContainer) ContainerInspect(_ context.Context, containerID string) (types.ContainerJSON, error) {
|
||||
func (f fakeContainer) ContainerInspect(_ context.Context, containerID string) (container.InspectResponse, error) {
|
||||
if containerID != testContainerID {
|
||||
return types.ContainerJSON{}, errors.New("expected test container ID")
|
||||
return container.InspectResponse{}, errors.New("expected test container ID")
|
||||
}
|
||||
config := container.Config(f)
|
||||
return types.ContainerJSON{
|
||||
return container.InspectResponse{
|
||||
Config: &config,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f fakeContainer) ImageInspectWithRaw(_ context.Context, imageName string) (types.ImageInspect, []byte, error) {
|
||||
return types.ImageInspect{ID: imageName, RepoDigests: []string{testImageID}}, nil, nil
|
||||
func (f fakeContainer) ImageInspectWithRaw(_ context.Context, imageName string) (image.InspectResponse, []byte, error) {
|
||||
return image.InspectResponse{ID: imageName, RepoDigests: []string{testImageID}}, nil, nil
|
||||
}
|
||||
|
||||
type fakeSigstoreVerifier struct {
|
||||
|
|
|
@ -390,7 +390,7 @@ func (p *Plugin) Attest(ctx context.Context, req *workloadattestorv1.AttestReque
|
|||
}
|
||||
|
||||
// wait a bit for containers to initialize before trying again.
|
||||
log.Warn("Container id not found", telemetry.RetryInterval, config.PollRetryInterval)
|
||||
log.Debug("Container id not found", telemetry.RetryInterval, config.PollRetryInterval)
|
||||
|
||||
select {
|
||||
case <-p.clock.After(config.PollRetryInterval):
|
||||
|
@ -651,7 +651,7 @@ func (p *Plugin) getPodList(ctx context.Context, client *kubeletClient, cacheFor
|
|||
return result, nil
|
||||
}
|
||||
|
||||
podList, err, _ := p.singleflight.Do("podList", func() (interface{}, error) {
|
||||
podList, err, _ := p.singleflight.Do("podList", func() (any, error) {
|
||||
result := p.getPodListCache()
|
||||
if result != nil {
|
||||
return result, nil
|
||||
|
|
|
@ -65,7 +65,7 @@ func (h *containerHelper) GetPodUIDAndContainerID(pID int32, log hclog.Logger) (
|
|||
}
|
||||
|
||||
extractor := containerinfo.Extractor{RootDir: h.rootDir, VerboseLogging: h.verboseContainerLocatorLogs}
|
||||
return extractor.GetPodUIDAndContainerID(int(pID), log)
|
||||
return extractor.GetPodUIDAndContainerID(pID, log)
|
||||
}
|
||||
|
||||
func getPodUIDAndContainerIDFromCGroups(cgroups []cgroups.Cgroup) (types.UID, string, error) {
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -951,8 +952,8 @@ func (s *Suite) requireAttestFailure(p workloadattestor.WorkloadAttestor, code c
|
|||
|
||||
func (s *Suite) requireSelectorsEqual(expected, actual []*common.Selector) {
|
||||
// assert the selectors (non-destructively sorting for consistency)
|
||||
actual = append([]*common.Selector(nil), actual...)
|
||||
expected = append([]*common.Selector(nil), expected...)
|
||||
actual = slices.Clone(actual)
|
||||
expected = slices.Clone(expected)
|
||||
util.SortSelectors(actual)
|
||||
util.SortSelectors(expected)
|
||||
s.RequireProtoListEqual(expected, actual)
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
"github.com/hashicorp/go-hclog"
|
||||
workloadattestorv1 "github.com/spiffe/spire-plugin-sdk/proto/spire/plugin/agent/workloadattestor/v1"
|
||||
"github.com/spiffe/spire/pkg/common/catalog"
|
||||
"github.com/spiffe/spire/pkg/common/util"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
@ -55,7 +56,11 @@ func (p *Plugin) SetLogger(log hclog.Logger) {
|
|||
}
|
||||
|
||||
func (p *Plugin) Attest(ctx context.Context, req *workloadattestorv1.AttestRequest) (*workloadattestorv1.AttestResponse, error) {
|
||||
uInfo, err := p.getUnitInfo(ctx, p, uint(req.Pid))
|
||||
pid, err := util.CheckedCast[uint](req.Pid)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid value for PID: %w", err)
|
||||
}
|
||||
uInfo, err := p.getUnitInfo(ctx, p, pid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -2,9 +2,11 @@ package workloadattestor
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
workloadattestorv1 "github.com/spiffe/spire-plugin-sdk/proto/spire/plugin/agent/workloadattestor/v1"
|
||||
"github.com/spiffe/spire/pkg/common/plugin"
|
||||
"github.com/spiffe/spire/pkg/common/util"
|
||||
"github.com/spiffe/spire/proto/spire/common"
|
||||
)
|
||||
|
||||
|
@ -14,8 +16,12 @@ type V1 struct {
|
|||
}
|
||||
|
||||
func (v1 *V1) Attest(ctx context.Context, pid int) ([]*common.Selector, error) {
|
||||
pidInt32, err := util.CheckedCast[int32](pid)
|
||||
if err != nil {
|
||||
return nil, v1.WrapErr(fmt.Errorf("invalid value for PID: %w", err))
|
||||
}
|
||||
resp, err := v1.WorkloadAttestorPluginClient.Attest(ctx, &workloadattestorv1.AttestRequest{
|
||||
Pid: int32(pid),
|
||||
Pid: pidInt32,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, v1.WrapErr(err)
|
||||
|
|
|
@ -254,7 +254,11 @@ type processQueryer interface {
|
|||
type processQuery struct{}
|
||||
|
||||
func (q *processQuery) OpenProcess(pid int32) (handle windows.Handle, err error) {
|
||||
return windows.OpenProcess(windows.PROCESS_QUERY_LIMITED_INFORMATION, false, uint32(pid))
|
||||
pidUint32, err := util.CheckedCast[uint32](pid)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("invalid value for PID: %w", err)
|
||||
}
|
||||
return windows.OpenProcess(windows.PROCESS_QUERY_LIMITED_INFORMATION, false, pidUint32)
|
||||
}
|
||||
|
||||
func (q *processQuery) OpenProcessToken(h windows.Handle, token *windows.Token) (err error) {
|
||||
|
|
|
@ -0,0 +1,133 @@
|
|||
package trustbundlesources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spiffe/go-spiffe/v2/spiffeid"
|
||||
"github.com/spiffe/spire/pkg/common/bundleutil"
|
||||
"github.com/spiffe/spire/pkg/common/pemutil"
|
||||
)
|
||||
|
||||
type Bundle struct {
|
||||
config *Config
|
||||
log logrus.FieldLogger
|
||||
}
|
||||
|
||||
func New(config *Config, log logrus.FieldLogger) *Bundle {
|
||||
return &Bundle{
|
||||
config: config,
|
||||
log: log,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Bundle) GetBundle() ([]*x509.Certificate, bool, error) {
|
||||
var bundleBytes []byte
|
||||
var err error
|
||||
|
||||
switch {
|
||||
case b.config.TrustBundleURL != "":
|
||||
bundleBytes, err = downloadTrustBundle(b.config.TrustBundleURL, b.config.TrustBundleUnixSocket)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
case b.config.TrustBundlePath != "":
|
||||
bundleBytes, err = loadTrustBundle(b.config.TrustBundlePath)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("could not parse trust bundle: %w", err)
|
||||
}
|
||||
default:
|
||||
// If InsecureBootstrap is configured, the bundle is not required
|
||||
if b.config.InsecureBootstrap {
|
||||
return nil, true, nil
|
||||
}
|
||||
}
|
||||
|
||||
bundle, err := parseTrustBundle(bundleBytes, b.config.TrustBundleFormat)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
if len(bundle) == 0 {
|
||||
return nil, false, errors.New("no certificates found in trust bundle")
|
||||
}
|
||||
|
||||
return bundle, false, nil
|
||||
}
|
||||
|
||||
func (b *Bundle) GetInsecureBootstrap() bool {
|
||||
return b.config.InsecureBootstrap
|
||||
}
|
||||
|
||||
func parseTrustBundle(bundleBytes []byte, trustBundleContentType string) ([]*x509.Certificate, error) {
|
||||
switch trustBundleContentType {
|
||||
case BundleFormatPEM:
|
||||
bundle, err := pemutil.ParseCertificates(bundleBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bundle, nil
|
||||
case BundleFormatSPIFFE:
|
||||
bundle, err := bundleutil.Unmarshal(spiffeid.TrustDomain{}, bundleBytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to parse SPIFFE trust bundle: %w", err)
|
||||
}
|
||||
return bundle.X509Authorities(), nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unknown trust bundle format: %s", trustBundleContentType)
|
||||
}
|
||||
|
||||
func downloadTrustBundle(trustBundleURL string, trustBundleUnixSocket string) ([]byte, error) {
|
||||
var req *http.Request
|
||||
client := http.DefaultClient
|
||||
if trustBundleUnixSocket != "" {
|
||||
client = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
DialContext: func(_ context.Context, _, _ string) (net.Conn, error) {
|
||||
return net.Dial("unix", trustBundleUnixSocket)
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
req, err := http.NewRequest("GET", trustBundleURL, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Download the trust bundle URL from the user specified URL
|
||||
// We use gosec -- the annotation below will disable a security check that URLs are not tainted
|
||||
/* #nosec G107 */
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to fetch trust bundle URL %s: %w", trustBundleURL, err)
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("error downloading trust bundle: %s", resp.Status)
|
||||
}
|
||||
pemBytes, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read from trust bundle URL %s: %w", trustBundleURL, err)
|
||||
}
|
||||
|
||||
return pemBytes, nil
|
||||
}
|
||||
|
||||
func loadTrustBundle(path string) ([]byte, error) {
|
||||
bundleBytes, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bundleBytes, nil
|
||||
}
|
|
@ -0,0 +1,262 @@
|
|||
package trustbundlesources
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/sirupsen/logrus/hooks/test"
|
||||
"github.com/spiffe/spire/test/util"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetBundle(t *testing.T) {
|
||||
testTrustBundlePath := path.Join(util.ProjectRoot(), "conf/agent/dummy_root_ca.crt")
|
||||
testTBSPIFFE := `{
|
||||
"keys": [
|
||||
{
|
||||
"use": "x509-svid",
|
||||
"kty": "EC",
|
||||
"crv": "P-384",
|
||||
"x": "WjB-nSGSxIYiznb84xu5WGDZj80nL7W1c3zf48Why0ma7Y7mCBKzfQkrgDguI4j0",
|
||||
"y": "Z-0_tDH_r8gtOtLLrIpuMwWHoe4vbVBFte1vj6Xt6WeE8lXwcCvLs_mcmvPqVK9j",
|
||||
"x5c": [
|
||||
"MIIBzDCCAVOgAwIBAgIJAJM4DhRH0vmuMAoGCCqGSM49BAMEMB4xCzAJBgNVBAYTAlVTMQ8wDQYDVQQKDAZTUElGRkUwHhcNMTgwNTEzMTkzMzQ3WhcNMjMwNTEyMTkzMzQ3WjAeMQswCQYDVQQGEwJVUzEPMA0GA1UECgwGU1BJRkZFMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEWjB+nSGSxIYiznb84xu5WGDZj80nL7W1c3zf48Why0ma7Y7mCBKzfQkrgDguI4j0Z+0/tDH/r8gtOtLLrIpuMwWHoe4vbVBFte1vj6Xt6WeE8lXwcCvLs/mcmvPqVK9jo10wWzAdBgNVHQ4EFgQUh6XzV6LwNazA+GTEVOdu07o5yOgwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwGQYDVR0RBBIwEIYOc3BpZmZlOi8vbG9jYWwwCgYIKoZIzj0EAwQDZwAwZAIwE4Me13qMC9i6Fkx0h26y09QZIbuRqA9puLg9AeeAAyo5tBzRl1YL0KNEp02VKSYJAjBdeJvqjJ9wW55OGj1JQwDFD7kWeEB6oMlwPbI/5hEY3azJi16I0uN1JSYTSWGSqWc="
|
||||
]
|
||||
}
|
||||
]
|
||||
}`
|
||||
cases := []struct {
|
||||
msg string
|
||||
insecureBootstrap bool
|
||||
error bool
|
||||
trustBundlePath string
|
||||
trustBundleFormat string
|
||||
trustBundleURL bool
|
||||
trustBundleSocket string
|
||||
}{
|
||||
{
|
||||
msg: "insecure mode",
|
||||
insecureBootstrap: true,
|
||||
error: false,
|
||||
},
|
||||
{
|
||||
msg: "from file",
|
||||
insecureBootstrap: false,
|
||||
error: false,
|
||||
trustBundlePath: testTrustBundlePath,
|
||||
trustBundleFormat: BundleFormatPEM,
|
||||
},
|
||||
{
|
||||
msg: "from file wrong format",
|
||||
insecureBootstrap: false,
|
||||
error: true,
|
||||
trustBundlePath: testTrustBundlePath,
|
||||
trustBundleFormat: BundleFormatSPIFFE,
|
||||
},
|
||||
{
|
||||
msg: "from file that doesn't exist",
|
||||
insecureBootstrap: false,
|
||||
error: true,
|
||||
trustBundlePath: "doesnotexist",
|
||||
trustBundleFormat: BundleFormatPEM,
|
||||
},
|
||||
{
|
||||
msg: "from url ok",
|
||||
insecureBootstrap: false,
|
||||
error: false,
|
||||
trustBundleURL: true,
|
||||
trustBundleFormat: BundleFormatSPIFFE,
|
||||
},
|
||||
{
|
||||
msg: "from url socket, fail",
|
||||
insecureBootstrap: false,
|
||||
error: true,
|
||||
trustBundleURL: true,
|
||||
trustBundleFormat: BundleFormatSPIFFE,
|
||||
trustBundleSocket: "doesnotexist",
|
||||
},
|
||||
}
|
||||
for _, testCase := range cases {
|
||||
t.Run(testCase.msg, func(t *testing.T) {
|
||||
var err error
|
||||
c := Config{
|
||||
InsecureBootstrap: testCase.insecureBootstrap,
|
||||
TrustBundlePath: testCase.trustBundlePath,
|
||||
TrustBundleFormat: testCase.trustBundleFormat,
|
||||
TrustBundleUnixSocket: testCase.trustBundleSocket,
|
||||
}
|
||||
testServer := httptest.NewServer(http.HandlerFunc(
|
||||
func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, _ = io.WriteString(w, testTBSPIFFE)
|
||||
}))
|
||||
if testCase.trustBundleURL {
|
||||
c.TrustBundleURL = testServer.URL
|
||||
}
|
||||
log, _ := test.NewNullLogger()
|
||||
tbs := New(&c, log)
|
||||
require.NoError(t, err)
|
||||
|
||||
trustBundle, insecureBootstrap, err := tbs.GetBundle()
|
||||
if testCase.error {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, insecureBootstrap, testCase.insecureBootstrap)
|
||||
if testCase.trustBundlePath != "" {
|
||||
require.Equal(t, len(trustBundle), 1)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDownloadTrustBundle(t *testing.T) {
|
||||
testTB, _ := os.ReadFile(path.Join(util.ProjectRoot(), "conf/agent/dummy_root_ca.crt"))
|
||||
testTBSPIFFE := `{
|
||||
"keys": [
|
||||
{
|
||||
"use": "x509-svid",
|
||||
"kty": "EC",
|
||||
"crv": "P-384",
|
||||
"x": "WjB-nSGSxIYiznb84xu5WGDZj80nL7W1c3zf48Why0ma7Y7mCBKzfQkrgDguI4j0",
|
||||
"y": "Z-0_tDH_r8gtOtLLrIpuMwWHoe4vbVBFte1vj6Xt6WeE8lXwcCvLs_mcmvPqVK9j",
|
||||
"x5c": [
|
||||
"MIIBzDCCAVOgAwIBAgIJAJM4DhRH0vmuMAoGCCqGSM49BAMEMB4xCzAJBgNVBAYTAlVTMQ8wDQYDVQQKDAZTUElGRkUwHhcNMTgwNTEzMTkzMzQ3WhcNMjMwNTEyMTkzMzQ3WjAeMQswCQYDVQQGEwJVUzEPMA0GA1UECgwGU1BJRkZFMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEWjB+nSGSxIYiznb84xu5WGDZj80nL7W1c3zf48Why0ma7Y7mCBKzfQkrgDguI4j0Z+0/tDH/r8gtOtLLrIpuMwWHoe4vbVBFte1vj6Xt6WeE8lXwcCvLs/mcmvPqVK9jo10wWzAdBgNVHQ4EFgQUh6XzV6LwNazA+GTEVOdu07o5yOgwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwGQYDVR0RBBIwEIYOc3BpZmZlOi8vbG9jYWwwCgYIKoZIzj0EAwQDZwAwZAIwE4Me13qMC9i6Fkx0h26y09QZIbuRqA9puLg9AeeAAyo5tBzRl1YL0KNEp02VKSYJAjBdeJvqjJ9wW55OGj1JQwDFD7kWeEB6oMlwPbI/5hEY3azJi16I0uN1JSYTSWGSqWc="
|
||||
]
|
||||
}
|
||||
]
|
||||
}`
|
||||
|
||||
cases := []struct {
|
||||
msg string
|
||||
status int
|
||||
fileContents string
|
||||
format string
|
||||
expectDownloadError bool
|
||||
expectParseError bool
|
||||
unixSocket bool
|
||||
}{
|
||||
{
|
||||
msg: "if URL is not found, should be an error",
|
||||
status: http.StatusNotFound,
|
||||
fileContents: "",
|
||||
format: BundleFormatPEM,
|
||||
expectDownloadError: true,
|
||||
expectParseError: false,
|
||||
unixSocket: false,
|
||||
},
|
||||
{
|
||||
msg: "if URL returns error 500, should be an error",
|
||||
status: http.StatusInternalServerError,
|
||||
fileContents: "",
|
||||
format: BundleFormatPEM,
|
||||
expectDownloadError: true,
|
||||
expectParseError: false,
|
||||
unixSocket: false,
|
||||
},
|
||||
{
|
||||
msg: "if file is not parseable, should be an error",
|
||||
status: http.StatusOK,
|
||||
fileContents: "NON PEM PARSEABLE TEXT HERE",
|
||||
format: BundleFormatPEM,
|
||||
expectDownloadError: false,
|
||||
expectParseError: true,
|
||||
unixSocket: false,
|
||||
},
|
||||
{
|
||||
msg: "if file is empty, should be an error",
|
||||
status: http.StatusOK,
|
||||
fileContents: "",
|
||||
format: BundleFormatPEM,
|
||||
expectDownloadError: false,
|
||||
expectParseError: true,
|
||||
unixSocket: false,
|
||||
},
|
||||
{
|
||||
msg: "if file is valid, should not be an error",
|
||||
status: http.StatusOK,
|
||||
fileContents: string(testTB),
|
||||
format: BundleFormatPEM,
|
||||
expectDownloadError: false,
|
||||
expectParseError: false,
|
||||
unixSocket: false,
|
||||
},
|
||||
{
|
||||
msg: "if file is not parseable, format is SPIFFE, should not be an error",
|
||||
status: http.StatusOK,
|
||||
fileContents: "[}",
|
||||
format: BundleFormatSPIFFE,
|
||||
expectDownloadError: false,
|
||||
expectParseError: true,
|
||||
unixSocket: false,
|
||||
},
|
||||
{
|
||||
msg: "if file is valid, format is SPIFFE, should not be an error",
|
||||
status: http.StatusOK,
|
||||
fileContents: testTBSPIFFE,
|
||||
format: BundleFormatSPIFFE,
|
||||
expectDownloadError: false,
|
||||
expectParseError: false,
|
||||
unixSocket: false,
|
||||
},
|
||||
{
|
||||
msg: "if file is valid, format is SPIFFE, unix socket true, should not be an error",
|
||||
status: http.StatusOK,
|
||||
fileContents: testTBSPIFFE,
|
||||
format: BundleFormatSPIFFE,
|
||||
expectDownloadError: false,
|
||||
expectParseError: false,
|
||||
unixSocket: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range cases {
|
||||
t.Run(testCase.msg, func(t *testing.T) {
|
||||
var unixSocket string
|
||||
var err error
|
||||
var bundleBytes []byte
|
||||
if testCase.unixSocket {
|
||||
tempDir, err := os.MkdirTemp("", "my-temp-dir-*")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(tempDir)
|
||||
unixSocket = filepath.Join(tempDir, "socket")
|
||||
}
|
||||
testServer := httptest.NewUnstartedServer(http.HandlerFunc(
|
||||
func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(testCase.status)
|
||||
_, _ = io.WriteString(w, testCase.fileContents)
|
||||
// if err != nil {
|
||||
// return
|
||||
// }
|
||||
}))
|
||||
if testCase.unixSocket {
|
||||
testServer.Listener, err = net.Listen("unix", unixSocket)
|
||||
require.NoError(t, err)
|
||||
testServer.Start()
|
||||
bundleBytes, err = downloadTrustBundle("http://localhost/trustbundle", unixSocket)
|
||||
} else {
|
||||
testServer.Start()
|
||||
bundleBytes, err = downloadTrustBundle(testServer.URL, "")
|
||||
}
|
||||
if testCase.expectDownloadError {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err := parseTrustBundle(bundleBytes, testCase.format)
|
||||
if testCase.expectParseError {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
package trustbundlesources
|
||||
|
||||
const (
|
||||
BundleFormatPEM = "pem"
|
||||
BundleFormatSPIFFE = "spiffe"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
InsecureBootstrap bool
|
||||
TrustBundleFormat string
|
||||
TrustBundlePath string
|
||||
TrustBundleURL string
|
||||
TrustBundleUnixSocket string
|
||||
}
|
|
@ -18,11 +18,6 @@ func UntrackedUDSCredentials() credentials.TransportCredentials {
|
|||
return untrackedUDSCredentials{}
|
||||
}
|
||||
|
||||
func IsUntrackedUDSAuth(authInfo credentials.AuthInfo) bool {
|
||||
_, ok := authInfo.(UntrackedUDSAuthInfo)
|
||||
return ok
|
||||
}
|
||||
|
||||
type UntrackedUDSAuthInfo struct{}
|
||||
|
||||
func (UntrackedUDSAuthInfo) AuthType() string { return "untracked-uds" }
|
||||
|
|
|
@ -31,19 +31,11 @@ func (r *sizeLimitedBackOff) NextBackOff() int {
|
|||
}
|
||||
|
||||
func (r *sizeLimitedBackOff) Success() {
|
||||
newSize := r.currentSize * 2
|
||||
if newSize > r.maxSize {
|
||||
newSize = r.maxSize
|
||||
}
|
||||
r.currentSize = newSize
|
||||
r.currentSize = min(r.currentSize*2, r.maxSize)
|
||||
}
|
||||
|
||||
func (r *sizeLimitedBackOff) Failure() {
|
||||
newSize := r.currentSize / 2
|
||||
if newSize < 1 {
|
||||
newSize = 1
|
||||
}
|
||||
r.currentSize = newSize
|
||||
r.currentSize = max(r.currentSize/2, 1)
|
||||
}
|
||||
|
||||
func (r *sizeLimitedBackOff) Reset() {
|
||||
|
|
|
@ -73,12 +73,11 @@ func TestAppendFlag(t *testing.T) {
|
|||
fs.Var(defaultFlagValue, flagName, "")
|
||||
}
|
||||
err := fs.Parse(c.input)
|
||||
switch {
|
||||
case err == nil:
|
||||
if err == nil {
|
||||
if c.expectError {
|
||||
t.Fatal("expected an error but got none")
|
||||
}
|
||||
default:
|
||||
} else {
|
||||
if !c.expectError {
|
||||
t.Fatalf("got unexpected error: %v", err)
|
||||
}
|
||||
|
|
|
@ -28,12 +28,11 @@ func TestStrFormatType(t *testing.T) {
|
|||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
ft, err := strToFormatType(c.input)
|
||||
switch {
|
||||
case err == nil:
|
||||
if err == nil {
|
||||
if c.expectError {
|
||||
t.Error("expected error but got none")
|
||||
}
|
||||
default:
|
||||
} else {
|
||||
if !c.expectError {
|
||||
t.Errorf("got unexpected error: %v", err)
|
||||
}
|
||||
|
|
|
@ -12,7 +12,7 @@ func Print(err error, stdout, _ io.Writer) error {
|
|||
}
|
||||
|
||||
if err.Error() == "" {
|
||||
err = errors.New("An unknown error occurred")
|
||||
err = errors.New("an unknown error occurred")
|
||||
}
|
||||
|
||||
_, e := fmt.Fprintln(stdout, err.Error())
|
||||
|
|
|
@ -24,7 +24,7 @@ func TestPrint(t *testing.T) {
|
|||
{
|
||||
name: "error_without_string_is_still_an_error",
|
||||
err: errors.New(""),
|
||||
stdout: "An unknown error occurred\n",
|
||||
stdout: "an unknown error occurred\n",
|
||||
stderr: "",
|
||||
},
|
||||
{
|
||||
|
|
|
@ -5,11 +5,13 @@ package process
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
"unsafe"
|
||||
|
||||
"github.com/hashicorp/go-hclog"
|
||||
"github.com/spiffe/spire/pkg/common/telemetry"
|
||||
"github.com/spiffe/spire/pkg/common/util"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
|
@ -46,7 +48,11 @@ func (h *helper) GetContainerIDByProcess(pID int32, log hclog.Logger) (string, e
|
|||
currentProcess := h.wapi.CurrentProcess()
|
||||
|
||||
// Duplicate the process handle that we want to validate, with limited permissions.
|
||||
childProcessHandle, err := h.wapi.OpenProcess(windows.PROCESS_QUERY_LIMITED_INFORMATION, false, uint32(pID))
|
||||
pidUint32, err := util.CheckedCast[uint32](pID)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("invalid value for PID: %w", err)
|
||||
}
|
||||
childProcessHandle, err := h.wapi.OpenProcess(windows.PROCESS_QUERY_LIMITED_INFORMATION, false, pidUint32)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to open child process: %w", err)
|
||||
}
|
||||
|
@ -63,12 +69,7 @@ func (h *helper) GetContainerIDByProcess(pID int32, log hclog.Logger) (string, e
|
|||
|
||||
// Verify if process ID is a vmcompute process
|
||||
isVmcomputeProcess := func(pID uint32) bool {
|
||||
for _, vmID := range vmComputeProcessIds {
|
||||
if pID == vmID {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return slices.Contains(vmComputeProcessIds, pID)
|
||||
}
|
||||
|
||||
var jobNames []string
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/spiffe/spire/pkg/common/util"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
|
@ -90,7 +91,7 @@ func (a *api) GetObjectType(handle windows.Handle) (string, error) {
|
|||
length := uint32(0)
|
||||
|
||||
status := ntQueryObject(handle, ObjectTypeInformationClass,
|
||||
&buffer[0], uint32(len(buffer)), &length)
|
||||
&buffer[0], util.MustCast[uint32](len(buffer)), &length)
|
||||
if status != windows.STATUS_SUCCESS {
|
||||
return "", status
|
||||
}
|
||||
|
@ -104,7 +105,7 @@ func (a *api) GetObjectName(handle windows.Handle) (string, error) {
|
|||
var length uint32
|
||||
|
||||
status := ntQueryObject(handle, ObjectNameInformationClass,
|
||||
&buffer[0], uint32(len(buffer)), &length)
|
||||
&buffer[0], util.MustCast[uint32](len(buffer)), &length)
|
||||
if status != windows.STATUS_SUCCESS {
|
||||
return "", status
|
||||
}
|
||||
|
@ -121,7 +122,7 @@ func (a *api) QuerySystemExtendedHandleInformation() ([]SystemHandleInformationE
|
|||
status = ntQuerySystemInformation(
|
||||
windows.SystemExtendedHandleInformation,
|
||||
unsafe.Pointer(&buffer[0]),
|
||||
uint32(len(buffer)),
|
||||
util.MustCast[uint32](len(buffer)),
|
||||
&retLen,
|
||||
)
|
||||
|
||||
|
|
|
@ -40,16 +40,16 @@ type Extractor struct {
|
|||
VerboseLogging bool
|
||||
}
|
||||
|
||||
func (e *Extractor) GetContainerID(pid int, log hclog.Logger) (string, error) {
|
||||
func (e *Extractor) GetContainerID(pid int32, log hclog.Logger) (string, error) {
|
||||
_, containerID, err := e.extractInfo(pid, log, false)
|
||||
return containerID, err
|
||||
}
|
||||
|
||||
func (e *Extractor) GetPodUIDAndContainerID(pid int, log hclog.Logger) (types.UID, string, error) {
|
||||
func (e *Extractor) GetPodUIDAndContainerID(pid int32, log hclog.Logger) (types.UID, string, error) {
|
||||
return e.extractInfo(pid, log, true)
|
||||
}
|
||||
|
||||
func (e *Extractor) extractInfo(pid int, log hclog.Logger, extractPodUID bool) (types.UID, string, error) {
|
||||
func (e *Extractor) extractInfo(pid int32, log hclog.Logger, extractPodUID bool) (types.UID, string, error) {
|
||||
// Try to get the information from /proc/pid/mountinfo first. Otherwise,
|
||||
// fall back to /proc/pid/cgroup. If it isn't in mountinfo, then the
|
||||
// workload being attested likely originates in the same Pod as the agent.
|
||||
|
@ -74,7 +74,7 @@ func (e *Extractor) extractInfo(pid int, log hclog.Logger, extractPodUID bool) (
|
|||
return podUID, containerID, nil
|
||||
}
|
||||
|
||||
func (e *Extractor) extractPodUIDAndContainerIDFromMountInfo(pid int, log hclog.Logger, extractPodUID bool) (types.UID, string, error) {
|
||||
func (e *Extractor) extractPodUIDAndContainerIDFromMountInfo(pid int32, log hclog.Logger, extractPodUID bool) (types.UID, string, error) {
|
||||
mountInfoPath := filepath.Join(e.RootDir, "/proc", fmt.Sprint(pid), "mountinfo")
|
||||
|
||||
mountInfos, err := mount.ParseMountInfo(mountInfoPath)
|
||||
|
@ -122,8 +122,8 @@ func (e *Extractor) extractPodUIDAndContainerIDFromMountInfo(pid int, log hclog.
|
|||
return ex.PodUID(), ex.ContainerID(), nil
|
||||
}
|
||||
|
||||
func (e *Extractor) extractPodUIDAndContainerIDFromCGroups(pid int, log hclog.Logger, extractPodUID bool) (types.UID, string, error) {
|
||||
cgroups, err := cgroups.GetCgroups(int32(pid), dirFS(e.RootDir))
|
||||
func (e *Extractor) extractPodUIDAndContainerIDFromCGroups(pid int32, log hclog.Logger, extractPodUID bool) (types.UID, string, error) {
|
||||
cgroups, err := cgroups.GetCgroups(pid, dirFS(e.RootDir))
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return "", "", nil
|
||||
|
|
|
@ -73,12 +73,6 @@ func RequireToPluginFromCertificates(x509Certificates []*x509.Certificate) []*pl
|
|||
return pbs
|
||||
}
|
||||
|
||||
func RequireToPluginFromCertificate(x509Certificate *x509.Certificate) *plugintypes.X509Certificate {
|
||||
pb, err := ToPluginFromCertificate(x509Certificate)
|
||||
panicOnError(err)
|
||||
return pb
|
||||
}
|
||||
|
||||
func panicOnError(err error) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
|
|
@ -101,7 +101,7 @@ func (m *DiskCertManager) syncCertificateFiles() {
|
|||
return
|
||||
}
|
||||
|
||||
if certFileInfo.ModTime() != m.certLastModified || keyFileInfo.ModTime() != m.keyLastModified {
|
||||
if !certFileInfo.ModTime().Equal(m.certLastModified) || !keyFileInfo.ModTime().Equal(m.keyLastModified) {
|
||||
m.log.Info("File change detected, reloading certificate and key...")
|
||||
|
||||
if err := m.loadCert(); err != nil {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue