Compare commits
235 Commits
v0.5.1-ci-
...
main
Author | SHA1 | Date |
---|---|---|
|
ce74c12157 | |
|
9fd8effc53 | |
|
d57af95486 | |
|
e2588ff233 | |
|
8afe8efdaa | |
|
a1de8c1ef1 | |
|
a6e10ae792 | |
|
2d61d6e8f9 | |
|
8acef9d08f | |
|
298405f149 | |
|
44464dd1e3 | |
|
81ba92c2c7 | |
|
bf2f8f7e60 | |
|
ab54cae41c | |
|
bbe4c295d2 | |
|
431a598d23 | |
|
9b644f999e | |
|
6e67b814dd | |
|
d1e381df4a | |
|
83ee07fe7d | |
|
ac6b057e65 | |
|
9fa8cd7d55 | |
|
6e179656be | |
|
e67f7750fc | |
|
5b37ff7323 | |
|
36d6e6da78 | |
|
d082fb5c24 | |
|
4622a01a92 | |
|
de1993c5dd | |
|
b11e53f18f | |
|
ee2e37e0a7 | |
|
1187777fb5 | |
|
8733c4174d | |
|
be4bf1c533 | |
|
5a4f7c7cdd | |
|
067191ee67 | |
|
cdfd8fecd7 | |
|
c819235763 | |
|
61f42ba29a | |
|
38b38d1ea0 | |
|
641c44e3b9 | |
|
d9aacff17e | |
|
dcae036452 | |
|
916b06c7b3 | |
|
0b2226dd90 | |
|
087434a725 | |
|
f3b17ec991 | |
|
a4d8e11d8c | |
|
b289ad875e | |
|
16ff0df093 | |
|
f041ffc9b3 | |
|
0fd33ad844 | |
|
b991165485 | |
|
244f9b8523 | |
|
7d3f227466 | |
|
76e56375ce | |
|
99e5f845e8 | |
|
9100769d92 | |
|
0ec9841b4c | |
|
e799d2da83 | |
|
5636995a0d | |
|
2265a0228d | |
|
a720fe8d54 | |
|
96e9001d19 | |
|
c5528e20f5 | |
|
012d661fe0 | |
|
bbe3d7ba18 | |
|
73afb5b464 | |
|
17c97df597 | |
|
d0c43990f8 | |
|
0cd8eab2db | |
|
ff2c7e4970 | |
|
1508b543ca | |
|
69d36edc2c | |
|
a1645e06bd | |
|
81436ee41e | |
|
1471abe0c7 | |
|
68d41794f4 | |
|
3da9b7d7a0 | |
|
6fb399dc06 | |
|
870f7c6cd0 | |
|
be7b1c663b | |
|
ad5cecdb58 | |
|
0ccc6da4d8 | |
|
22287ea6e6 | |
|
c19e245537 | |
|
7668659344 | |
|
818079e6ec | |
|
2c82045d19 | |
|
966d0a7385 | |
|
26c929b0e8 | |
|
323a58f954 | |
|
06ca899515 | |
|
f23cc3979a | |
|
dd48a97988 | |
|
ae09fafd0c | |
|
3833432e56 | |
|
59fe7fc633 | |
|
4dcbce3613 | |
|
21decd1bd6 | |
|
1aedbc71f6 | |
|
0a9a33ebe9 | |
|
34b8bb734a | |
|
b9373365c6 | |
|
b4bf7840b4 | |
|
e882550496 | |
|
e53b1c9d8d | |
|
b3041b7317 | |
|
63774e09a3 | |
|
51bb817328 | |
|
81fdcd7ace | |
|
aa2eb72156 | |
|
371de79569 | |
|
02212f67a7 | |
|
b867a95198 | |
|
c837f51c54 | |
|
0cc765fe6a | |
|
5cfc934f8c | |
|
9061b8c5f0 | |
|
f1d8f82444 | |
|
681b7c3008 | |
|
6e0877cb52 | |
|
915966d479 | |
|
cd051a2d1b | |
|
2ea16ba6ac | |
|
9202924749 | |
|
c7f09fcb2b | |
|
416c8a8893 | |
|
bc9fa39cba | |
|
775e9e1683 | |
|
c5e9d8cf8c | |
|
c302df9ba1 | |
|
046e126b09 | |
|
f587c3553c | |
|
8e5f039f32 | |
|
ad514159c4 | |
|
cbf0470bbe | |
|
677d3c5534 | |
|
fa0792c798 | |
|
995601495c | |
|
53a116054e | |
|
3a98ce47f6 | |
|
a4c6abd9fb | |
|
dec113447c | |
|
7f0442027f | |
|
34f264b379 | |
|
7ec4815bd9 | |
|
16ae2f5262 | |
|
5f7f57ab93 | |
|
c7f3ec0d2b | |
|
3632c7a8b1 | |
|
b41c555e2a | |
|
dd0f63d24d | |
|
4e6f88d9f3 | |
|
c7fb2db0d0 | |
|
91ad46df1a | |
|
3eb578c686 | |
|
1305af3c75 | |
|
95006e6032 | |
|
919c0cfc38 | |
|
860e3747d2 | |
|
ada62c1754 | |
|
8fca278bc9 | |
|
847cb69492 | |
|
1322270f59 | |
|
e2531e47f4 | |
|
37ff6e827f | |
|
dc14d46ddc | |
|
9b9b4e9712 | |
|
08fae4a941 | |
|
09de7aa4be | |
|
f43663bdae | |
|
1eb71d9de2 | |
|
9fb81ae386 | |
|
7e7efeddd4 | |
|
3b4e6fd719 | |
|
4ece4441c9 | |
|
a943243334 | |
|
3315095c52 | |
|
bb6b756977 | |
|
e6467ccace | |
|
745d845fdf | |
|
662ed9f5ed | |
|
79de13dc9b | |
|
adb791097d | |
|
468ee8781c | |
|
8360d8645c | |
|
1570bef651 | |
|
2386ce3245 | |
|
02d4e2c74e | |
|
dd72a0e8f8 | |
|
55b8cfb14b | |
|
f39a0ce365 | |
|
29b87aa982 | |
|
ab2a157f97 | |
|
d0e318e54d | |
|
70a46a0cec | |
|
25a45c3631 | |
|
f5b5aa46d3 | |
|
396259f331 | |
|
45c10121ca | |
|
2f32d39108 | |
|
1570b535f6 | |
|
05ad43ab85 | |
|
be21781aa2 | |
|
e0cfc1300f | |
|
fff8230233 | |
|
f872a7f4c5 | |
|
537ebb1c1e | |
|
4ac94ac3bf | |
|
da78be34cc | |
|
b52c2a1ed3 | |
|
d8c9aff3da | |
|
5ed399f6fc | |
|
800f3b68ab | |
|
844fa1bfde | |
|
6038773b10 | |
|
ff94753fd3 | |
|
f45aced504 | |
|
a0a27a47e2 | |
|
9886f9e325 | |
|
b2ace767c8 | |
|
3b8f3e858a | |
|
60a63ff6aa | |
|
47d399fc7a | |
|
5fce8cdf34 | |
|
45fe536153 | |
|
6ac9b62f0f | |
|
3b0695cb63 | |
|
8db289d088 | |
|
af9e13d100 | |
|
d1eb12949d | |
|
bc248b68e3 | |
|
a9537b557c | |
|
d7913a6789 |
|
@ -26,6 +26,10 @@ updates:
|
|||
interval: "weekly"
|
||||
commit-message:
|
||||
prefix: "chore"
|
||||
groups:
|
||||
github-actions:
|
||||
patterns:
|
||||
- "*"
|
||||
- package-ecosystem: docker
|
||||
directory: /
|
||||
schedule:
|
||||
|
|
|
@ -55,20 +55,20 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1
|
||||
uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
|
||||
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
with:
|
||||
go-version-file: "go.mod"
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@4fa2a7953630fd2f3fb380f21be14ede0169dd4f # v3.25.12
|
||||
uses: github/codeql-action/init@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
|
@ -78,7 +78,7 @@ jobs:
|
|||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@4fa2a7953630fd2f3fb380f21be14ede0169dd4f # v3.25.12
|
||||
uses: github/codeql-action/autobuild@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
|
||||
|
@ -91,6 +91,6 @@ jobs:
|
|||
# ./location_of_script_within_repo/buildscript.sh
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@4fa2a7953630fd2f3fb380f21be14ede0169dd4f # v3.25.12
|
||||
uses: github/codeql-action/analyze@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2
|
||||
with:
|
||||
category: "/language:${{matrix.language}}"
|
||||
|
|
|
@ -21,6 +21,10 @@ on:
|
|||
- master
|
||||
- main
|
||||
pull_request:
|
||||
paths:
|
||||
- 'ent/**'
|
||||
- 'ent.*'
|
||||
- '.github/workflows/db-migrations.yml'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
|
@ -32,11 +36,11 @@ jobs:
|
|||
name: db-migrations
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
|
||||
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
with:
|
||||
go-version: '1.21.x'
|
||||
go-version: '1.22.x'
|
||||
|
||||
- name: Check DB Migrations
|
||||
run: |
|
||||
|
|
|
@ -31,11 +31,11 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1
|
||||
uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: 'Checkout Repository'
|
||||
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: 'Dependency Review'
|
||||
uses: actions/dependency-review-action@5a2ce3f5b92ee19cbb1541a4984c76d921601d7c # v4.3.4
|
||||
uses: actions/dependency-review-action@da24556b548a50705dd671f47852072ea4c105d9 # v4.7.1
|
||||
|
|
|
@ -34,9 +34,9 @@ jobs:
|
|||
steps:
|
||||
- if: ${{ env.FOSSA_API_KEY != '' }}
|
||||
name: "Checkout Code"
|
||||
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- if: ${{ env.FOSSA_API_KEY != '' }}
|
||||
name: "Run FOSSA Scan"
|
||||
uses: fossas/fossa-action@47ef11b1e1e3812e88dae436ccbd2d0cbd1adab0 # v1.3.3
|
||||
uses: fossas/fossa-action@3ebcea1862c6ffbd5cf1b4d0bd6b3fe7bd6f2cac # v1.7.0
|
||||
with:
|
||||
api-key: ${{ env.FOSSA_API_KEY }}
|
||||
|
|
|
@ -30,18 +30,18 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1
|
||||
uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
|
||||
- uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
|
||||
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
with:
|
||||
go-version-file: "go.mod"
|
||||
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1
|
||||
uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0
|
||||
with:
|
||||
version: latest
|
||||
args: --timeout=3m
|
||||
|
|
|
@ -28,7 +28,7 @@ jobs:
|
|||
issues: write
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1
|
||||
uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
|
|
|
@ -70,31 +70,39 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1
|
||||
uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
- uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
with:
|
||||
go-version: 1.21.x
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3.2.0
|
||||
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Download GoReleaser
|
||||
run: go install github.com/goreleaser/goreleaser@v1.23.0
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: testifysec/witness-run-action@85ddab8b46a86b2905a3b547a1806ab264fbb810
|
||||
uses: testifysec/witness-run-action@d5cef0eea8f8b008c91f6b25f84e8c39f454f413
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GITHUB_REPOSITORY_OWNER: ${{ github.repository_owner }}
|
||||
with:
|
||||
witness-install-dir: /opt/witness
|
||||
version: 0.9.1
|
||||
step: "build"
|
||||
attestations: "github"
|
||||
command: goreleaser release --clean
|
||||
|
|
|
@ -45,17 +45,17 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1
|
||||
uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: "Checkout code"
|
||||
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: "Run analysis"
|
||||
uses: ossf/scorecard-action@dc50aa9510b46c811795eb24b2f1ba02a914e534 # v2.3.3
|
||||
uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.4.2
|
||||
with:
|
||||
results_file: results.sarif
|
||||
results_format: sarif
|
||||
|
@ -77,7 +77,7 @@ jobs:
|
|||
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
||||
# format to the repository Actions tab.
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
name: SARIF file
|
||||
path: results.sarif
|
||||
|
@ -85,6 +85,6 @@ jobs:
|
|||
|
||||
# Upload the results to GitHub's code scanning dashboard.
|
||||
- name: "Upload to code-scanning"
|
||||
uses: github/codeql-action/upload-sarif@4fa2a7953630fd2f3fb380f21be14ede0169dd4f # v3.25.12
|
||||
uses: github/codeql-action/upload-sarif@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2
|
||||
with:
|
||||
sarif_file: results.sarif
|
||||
|
|
|
@ -28,10 +28,11 @@ jobs:
|
|||
update-pre-commit-hooks:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332
|
||||
- uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
- uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065
|
||||
with:
|
||||
python-version: "3.11"
|
||||
- name: Install prerequisites
|
||||
|
@ -45,7 +46,7 @@ jobs:
|
|||
run: |
|
||||
echo "GIT_DIFF=$(git diff --exit-code 1> /dev/null; echo $?)" >> $GITHUB_OUTPUT
|
||||
- name: Create Pull Request
|
||||
uses: peter-evans/create-pull-request@c5a7806660adbe173f04e3e038b0ccdcd758773c # v6.1.0
|
||||
uses: peter-evans/create-pull-request@271a8d0340265f705b14b6d32b9829c1cb33d45e # v7.0.8
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
commit-message: "build: Update pre-commit hooks"
|
||||
|
|
|
@ -27,12 +27,12 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1
|
||||
uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
- uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
with:
|
||||
go-version: '1.19.x'
|
||||
- name: Install addlicense
|
||||
|
|
|
@ -39,18 +39,20 @@ jobs:
|
|||
id-token: write
|
||||
steps:
|
||||
- name: Harden Runner
|
||||
uses: step-security/harden-runner@17d0e2bd7d51742c71671bd19fa12bdc9d40a3d6 # v2.8.1
|
||||
uses: step-security/harden-runner@6c439dc8bdf85cadbbce9ed30d1c7b959517bc49 # v2.12.2
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
|
||||
- uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
with:
|
||||
go-version: 1.21.x
|
||||
go-version: 1.22.x
|
||||
|
||||
- if: ${{ inputs.pull_request == false }}
|
||||
uses: testifysec/witness-run-action@85ddab8b46a86b2905a3b547a1806ab264fbb810
|
||||
uses: testifysec/witness-run-action@d5cef0eea8f8b008c91f6b25f84e8c39f454f413
|
||||
with:
|
||||
witness-install-dir: /opt/witness
|
||||
version: 0.9.1
|
||||
step: ${{ inputs.step }}
|
||||
attestations: ${{ inputs.attestations }}
|
||||
command: /bin/sh -c "${{ inputs.command }}"
|
||||
|
@ -59,4 +61,4 @@ jobs:
|
|||
run: ${{ inputs.command }}
|
||||
|
||||
- if: ${{ inputs.step == 'tests' }}
|
||||
uses: codecov/codecov-action@e28ff129e5465c2c0dcc6f003fc735cb6ae0c673
|
||||
uses: codecov/codecov-action@18283e04ce6e62d37312384ff67231eb8fd56d24
|
||||
|
|
|
@ -12,14 +12,31 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
run:
|
||||
timeout: 6m
|
||||
version: "2"
|
||||
linters:
|
||||
enable:
|
||||
- gosec
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
rules:
|
||||
- linters:
|
||||
- gosec
|
||||
path: _test.go
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
issues:
|
||||
max-same-issues: 50
|
||||
exclude-rules:
|
||||
- path: _test.go
|
||||
linters:
|
||||
- gosec
|
||||
formatters:
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
|
|
|
@ -74,18 +74,56 @@ release:
|
|||
prerelease: auto
|
||||
github:
|
||||
owner: "{{ .Env.GITHUB_REPOSITORY_OWNER }}"
|
||||
dockers:
|
||||
- image_templates:
|
||||
- "ghcr.io/in-toto/archivista:{{ .Version }}-amd64"
|
||||
use: buildx
|
||||
build_flag_templates:
|
||||
- "--pull"
|
||||
- "--platform=linux/amd64"
|
||||
extra_files:
|
||||
- "archivista.graphql"
|
||||
- "ent.graphql"
|
||||
- "ent.resolvers.go"
|
||||
- "entrypoint.sh"
|
||||
- "gen.go"
|
||||
- "generated.go"
|
||||
- "go.mod"
|
||||
- "go.sum"
|
||||
- "resolver.go"
|
||||
- "docs"
|
||||
- "ent"
|
||||
- "cmd"
|
||||
- "ent"
|
||||
- "pkg"
|
||||
- image_templates:
|
||||
- "ghcr.io/in-toto/archivista:{{ .Version }}-arm64"
|
||||
use: buildx
|
||||
build_flag_templates:
|
||||
- "--pull"
|
||||
- "--platform=linux/arm64"
|
||||
extra_files:
|
||||
- "archivista.graphql"
|
||||
- "ent.graphql"
|
||||
- "ent.resolvers.go"
|
||||
- "entrypoint.sh"
|
||||
- "gen.go"
|
||||
- "generated.go"
|
||||
- "go.mod"
|
||||
- "go.sum"
|
||||
- "resolver.go"
|
||||
- "docs"
|
||||
- "ent"
|
||||
- "cmd"
|
||||
- "ent"
|
||||
- "pkg"
|
||||
goarch: arm64
|
||||
docker_manifests:
|
||||
- name_template: "ghcr.io/in-toto/archivista:{{ .Version }}"
|
||||
image_templates:
|
||||
- "ghcr.io/in-toto/archivista:{{ .Version }}-amd64"
|
||||
- "ghcr.io/in-toto/archivista:{{ .Version }}-arm64"
|
||||
kos:
|
||||
- repository: ghcr.io/in-toto/archivista
|
||||
id: archivista
|
||||
build: archivista
|
||||
tags:
|
||||
- '{{.Version}}'
|
||||
bare: true
|
||||
preserve_import_paths: false
|
||||
creation_time: '{{.CommitTimestamp}}'
|
||||
platforms:
|
||||
- linux/amd64
|
||||
- linux/arm64
|
||||
- repository: ghcr.io/in-toto/archivistactl
|
||||
id: archivistactl
|
||||
build: archivistactl
|
||||
|
|
|
@ -14,11 +14,11 @@
|
|||
|
||||
repos:
|
||||
- repo: https://github.com/gitleaks/gitleaks
|
||||
rev: v8.18.2
|
||||
rev: v8.27.2
|
||||
hooks:
|
||||
- id: gitleaks
|
||||
- repo: https://github.com/golangci/golangci-lint
|
||||
rev: v1.56.2
|
||||
rev: v2.2.2
|
||||
hooks:
|
||||
- id: golangci-lint
|
||||
- repo: https://github.com/jumanjihouse/pre-commit-hooks
|
||||
|
@ -26,7 +26,7 @@ repos:
|
|||
hooks:
|
||||
- id: shellcheck
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.5.0
|
||||
rev: v5.0.0
|
||||
hooks:
|
||||
- id: end-of-file-fixer
|
||||
- id: trailing-whitespace
|
||||
|
|
|
@ -81,7 +81,7 @@ you stay up-to-date with our repository:
|
|||
### Running Archivista Development Environment
|
||||
|
||||
*Please note that the following `make` commands make use of both the `docker` and
|
||||
`docker-compose` commands, so you may need to modify this locally if using tools
|
||||
`docker compose` commands, so you may need to modify this locally if using tools
|
||||
such as [nerdctl](https://github.com/containerd/nerdctl) or [podman](https://github.com/containers/podman).*
|
||||
|
||||
To start the Archivista development environment, simply execute the command:
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM golang:1.22.5-alpine@sha256:8c9183f715b0b4eca05b8b3dbf59766aaedb41ec07477b132ee2891ac0110a07 AS build
|
||||
FROM golang:1.24.5-alpine@sha256:ddf52008bce1be455fe2b22d780b6693259aaf97b16383b6372f4b22dd33ad66 AS build
|
||||
WORKDIR /src
|
||||
RUN apk update && apk add --no-cache file git curl
|
||||
RUN curl -sSf https://atlasgo.sh | sh
|
||||
|
@ -21,7 +21,7 @@ RUN --mount=target=. --mount=target=/root/.cache,type=cache \
|
|||
CGO_ENABLED=0 go build -o /out/archivista -ldflags '-s -d -w' ./cmd/archivista; \
|
||||
file /out/archivista | grep "statically linked"
|
||||
|
||||
FROM alpine:3.20.1@sha256:b89d9c93e9ed3597455c90a0b88a8bbb5cb7188438f70953fede212a0c4394e0
|
||||
FROM alpine:3.22.0@sha256:8a1f59ffb675680d47db6337b49d22281a139e9d709335b492be023728e11715
|
||||
COPY --from=build /out/archivista /bin/archivista
|
||||
COPY --from=build /usr/local/bin/atlas /bin/atlas
|
||||
ADD entrypoint.sh /bin/entrypoint.sh
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM golang:1.22.5-alpine@sha256:8c9183f715b0b4eca05b8b3dbf59766aaedb41ec07477b132ee2891ac0110a07 AS build
|
||||
FROM golang:1.24.5-alpine@sha256:ddf52008bce1be455fe2b22d780b6693259aaf97b16383b6372f4b22dd33ad66 AS build
|
||||
WORKDIR /src
|
||||
RUN apk update && apk add --no-cache file git curl
|
||||
RUN curl -sSf https://atlasgo.sh | sh
|
||||
|
|
|
@ -0,0 +1,8 @@
|
|||
As a sub-project of in-toto, this repository is subject to the governance by the in-toto steering committee.
|
||||
|
||||
This repository is also subject to the in-toto and CNCF code of conduct.
|
||||
|
||||
For more details, please reference the in-toto community repository:
|
||||
|
||||
- [GOVERNANCE.md](https://github.com/in-toto/community/blob/main/GOVERNANCE.md)
|
||||
- [CODE_OF_CONDUCT.md](https://github.com/in-toto/community/blob/main/CODE-OF-CONDUCT.md)
|
|
@ -1,8 +1,8 @@
|
|||
# Maintainers
|
||||
|
||||
| Name | GitHub |
|
||||
|----------------------------|-----------------|
|
||||
| Kairo de Araujo | [@kairoaraujo](https://github.com/kairoaraujo) |
|
||||
| Cole Kennedy (TestifySec) | [@colek42](https://github.com/colek42) |
|
||||
| John Kjell (TestifySec) | [@jkjell](https://github.com/jkjell) |
|
||||
| Mikhail Swift (TestifySec) | [@mikhailswift](https://github.com/mikhailswift) |
|
||||
| Name | GitHub |
|
||||
|-------------------------------|-----------------|
|
||||
| Kairo de Araujo (Independent) | [@kairoaraujo](https://github.com/kairoaraujo) |
|
||||
| Cole Kennedy (TestifySec) | [@colek42](https://github.com/colek42) |
|
||||
| John Kjell (ControlPlane) | [@jkjell](https://github.com/jkjell) |
|
||||
| Mikhail Swift (TestifySec) | [@mikhailswift](https://github.com/mikhailswift) |
|
||||
|
|
2
Makefile
2
Makefile
|
@ -23,7 +23,7 @@ run-dev: ## Run the dev server
|
|||
|
||||
.PHONY: stop
|
||||
stop: ## Stop the dev server
|
||||
@docker-compose -f compose-dev.yml down -v
|
||||
@docker compose -f compose-dev.yml down -v
|
||||
|
||||
|
||||
.PHONY: clean
|
||||
|
|
164
README.md
164
README.md
|
@ -8,21 +8,28 @@
|
|||
|
||||
# Archivista
|
||||
|
||||
Archivista is a graph and storage service for [in-toto](https://in-toto.io) attestations. Archivista enables the discovery
|
||||
and retrieval of attestations for software artifacts.
|
||||
Archivista is a graph and storage service for [in-toto](https://in-toto.io)
|
||||
attestations. Archivista enables the discovery and retrieval of attestations for
|
||||
software artifacts.
|
||||
|
||||
## Archivista enables you to:
|
||||
## Archivista enables you to
|
||||
|
||||
- Store and retrieve in-toto attestations
|
||||
- Query for relationships between attestations via a GraphQL API
|
||||
- Validate Witness policy without the need to manually list expected attestations
|
||||
- Validate Witness policy without the need to manually list expected
|
||||
attestations
|
||||
|
||||
## Archivista is a trusted store for supply chain metadata
|
||||
|
||||
- It creates a graph of supply chain metadata while storing attestations that can be later used for policy validation and flexible querying.
|
||||
- It is designed to be horizontally scaleable, supporting storing a large number of attestations.
|
||||
- It supports deployment on major cloud service and infrastructure providers, making it a versatile and flexible solution for securing software supply chains.
|
||||
- It only stores signed attestations to further enhance security and and increase trust.
|
||||
- It creates a graph of supply chain metadata while storing attestations that
|
||||
can be later used for policy validation and flexible querying.
|
||||
- It is designed to be horizontally scaleable, supporting storing a large number
|
||||
of attestations.
|
||||
- It supports deployment on major cloud service and infrastructure providers,
|
||||
making it a versatile and flexible solution for securing software supply
|
||||
chains.
|
||||
- It only stores signed attestations to further enhance security and and
|
||||
increase trust.
|
||||
|
||||
## Key Features
|
||||
|
||||
|
@ -36,88 +43,116 @@ and retrieval of attestations for software artifacts.
|
|||
|
||||
## How Archivista Works
|
||||
|
||||
When an attestation is uploaded to Archivista it will store the entire attestation in a configured object store as well
|
||||
as scrape some data from the attestation and store it in a queryable metadata store. This metadata is exposed through a
|
||||
GraphQL API. This enables queries such as finding all attestations related to an artifact with a specified hash or
|
||||
finding all attestations that recorded the use of a specific dependency.
|
||||
When an attestation is uploaded to Archivista it will store the entire
|
||||
attestation in a configured object store as well as scrape some data from the
|
||||
attestation and store it in a queryable metadata store. This metadata is exposed
|
||||
through a GraphQL API. This enables queries such as finding all attestations
|
||||
related to an artifact with a specified hash or finding all attestations that
|
||||
recorded the use of a specific dependency.
|
||||
|
||||
Archivista uses Subjects on the [in-toto
|
||||
Statement](https://github.com/in-toto/attestation/blob/main/spec/README.md#statement) as edges on this graph. Producers
|
||||
of attestations (such as [Witness](https://github.com/in-toto/witness) can use these subjects as a way to expose
|
||||
relationships between attestations.
|
||||
Archivista uses Subjects on the
|
||||
[in-toto Statement](https://github.com/in-toto/attestation/blob/main/spec/README.md#statement)
|
||||
as edges on this graph. Producers of attestations (such as
|
||||
[Witness](https://github.com/in-toto/witness) can use these subjects as a way to
|
||||
expose relationships between attestations.
|
||||
|
||||
For example when attesting that an artifact was compiled the compiled artifact may be a subject, as well as the git
|
||||
commit hash the artifact was built from. This would allow traversing the graph by the commit hash to find other relevant
|
||||
attestations such as those describing code reviews, testing, and scanning that happened on that git commit.
|
||||
For example when attesting that an artifact was compiled the compiled artifact
|
||||
may be a subject, as well as the git commit hash the artifact was built from.
|
||||
This would allow traversing the graph by the commit hash to find other relevant
|
||||
attestations such as those describing code reviews, testing, and scanning that
|
||||
happened on that git commit.
|
||||
|
||||
## Running Archivista
|
||||
|
||||
A public instance of Archivista is running [here](https://archivista.testifysec.io) for testing purposes. The data in this
|
||||
instance is open to the world and there are currently no SLAs defined for this instance.
|
||||
A public instance of Archivista is running
|
||||
[here](https://archivista.testifysec.io) for testing purposes. The data in this
|
||||
instance is open to the world and there are currently no SLAs defined for this
|
||||
instance.
|
||||
|
||||
Archivista requires a MySQL database as well as a compatible file store. Compatible file stores include a local directory
|
||||
or any S3 compatible store.
|
||||
Archivista requires a MySQL database as well as a compatible file store.
|
||||
Compatible file stores include a local directory or any S3 compatible store.
|
||||
|
||||
A docker compose file is included in the repository that will run a local instance of Archivista along with the necessary
|
||||
services for it to operate. These include Minio and MySQL. Simply cloning the repo and running
|
||||
A docker compose file is included in the repository that will run a local
|
||||
instance of Archivista along with the necessary services for it to operate.
|
||||
These include Minio and MySQL. Simply cloning the repo and running
|
||||
|
||||
```
|
||||
```bash
|
||||
docker compose up --build -d
|
||||
```
|
||||
|
||||
is enough to get a local instance of Archivista up and running. Archivista will be listening at `http://localhost:8082` by
|
||||
default with this docker compose file.
|
||||
is enough to get a local instance of Archivista up and running. Archivista will
|
||||
be listening at `http://localhost:8082` by default with this docker compose
|
||||
file.
|
||||
|
||||
### Configuration
|
||||
|
||||
Archivista is configured through environment variables currently.
|
||||
|
||||
| Variable | Default Value | Description |
|
||||
|--------------------------------------------|------------------------------|-----------------------------------------------------------------------------------------------|
|
||||
| ARCHIVISTA_LISTEN_ON | tcp://127.0.0.1:8082 | URL endpoint for Archivista to listen on |
|
||||
| ARCHIVISTA_LOG_LEVEL | INFO | Log level. Options are DEBUG, INFO, WARN, ERROR |
|
||||
| ARCHIVISTA_CORS_ALLOW_ORIGINS | | Comma separated list of origins to allow CORS requests from |
|
||||
| ARCHIVISTA_SQL_STORE_CONNECTION_STRING | root:example@tcp(db)/testify | SQL store connection string |
|
||||
| ARCHIVISTA_STORAGE_BACKEND | | Backend to use for attestation storage. Options are FILE, BLOB, or empty string for disabled. |
|
||||
| ARCHIVISTA_FILE_SERVE_ON | | What address to serve files on. Only valid when using FILE storage backend. |
|
||||
| ARCHIVISTA_FILE_DIR | /tmp/archivista/ | Directory to store and serve files. Only valid when using FILE storage backend. |
|
||||
| ARCHIVISTA_BLOB_STORE_ENDPOINT | 127.0.0.1:9000 | URL endpoint for blob storage. Only valid when using BLOB storage backend. |
|
||||
| ARCHIVISTA_BLOB_STORE_CREDENTIAL_TYPE | | Blob store credential type. Options are IAM or ACCESS_KEY. |
|
||||
| ARCHIVISTA_BLOB_STORE_ACCESS_KEY_ID | | Blob store access key id. Only valid when using BLOB storage backend. |
|
||||
| ARCHIVISTA_BLOB_STORE_SECRET_ACCESS_KEY_ID | | Blob store secret access key id. Only valid when using BLOB storage backend. |
|
||||
| ARCHIVISTA_BLOB_STORE_USE_TLS | TRUE | Use TLS for BLOB storage backend. Only valid when using BLOB storage backend. |
|
||||
| ARCHIVISTA_BLOB_STORE_BUCKET_NAME | | Bucket to use for storage. Only valid when using BLOB storage backend. |
|
||||
| ARCHIVISTA_ENABLE_GRAPHQL | TRUE | Enable GraphQL Endpoint |
|
||||
| ARCHIVISTA_GRAPHQL_WEB_CLIENT_ENABLE | TRUE | Enable GraphiQL, the GraphQL web client |
|
||||
| ARCHIVISTA_ENABLE_ARTIFACT_STORE | FALSE | Enable Artifact Store Endpoints |
|
||||
| ARCHIVISTA_ARTIFACT_STORE_CONFIG | /tmp/artifacts/config.yaml | Location of the config describing available artifacts |
|
||||
**Note**: If `ARCHIVISTA_ENABLE_SQL_STORE` is set to false no metadata about store attestations will be collected. Archivista will only store and retrieve attestations by Gitoid from it's storage. Archivista servers with GraphQL or SQL store disabled cannot be used to verify Witness policies.
|
||||
|
||||
| Variable | Default Value | Description |
|
||||
| ------------------------------------------ | ----------------------------------------- | ----------------------------------------------------------------------------------------------------------- |
|
||||
| ARCHIVISTA_LISTEN_ON | tcp://127.0.0.1:8082 | URL endpoint for Archivista to listen on |
|
||||
| ARCHIVISTA_READ_TIMEOUT | 120 | HTTP server read timeout |
|
||||
| ARCHIVISTA_WRITE_TIMEOUT | 120 | HTTP server write timeout |
|
||||
| ARCHIVISTA_LOG_LEVEL | INFO | Log level. Options are DEBUG, INFO, WARN, ERROR |
|
||||
| ARCHIVISTA_CORS_ALLOW_ORIGINS | | Comma separated list of origins to allow CORS requests from |
|
||||
| ARCHIVISTA_ENABLE_SQL_STORE | TRUE | Enable SQL Metadata store. If disabled, GraphQL will also be disabled |
|
||||
| ARCHIVISTA_SQL_STORE_BACKEND | | Backend to use for SQL. Options are MYSQL or PSQL |
|
||||
| ARCHIVISTA_SQL_STORE_CONNECTION_STRING | postgresql://root:example@tcp(db)/testify | SQL store connection string |
|
||||
| ARCHIVISTA_STORAGE_BACKEND | | Backend to use for attestation storage. Options are FILE, BLOB, or empty string for disabled. |
|
||||
| ARCHIVISTA_FILE_SERVE_ON | | What address to serve files on. Only valid when using FILE storage backend (e.g. `:8081`). |
|
||||
| ARCHIVISTA_FILE_DIR | /tmp/archivista/ | Directory to store and serve files. Only valid when using FILE storage backend. |
|
||||
| ARCHIVISTA_BLOB_STORE_ENDPOINT | 127.0.0.1:9000 | URL endpoint for blob storage. Only valid when using BLOB storage backend. |
|
||||
| ARCHIVISTA_BLOB_STORE_CREDENTIAL_TYPE | | Blob store credential type. Options are IAM or ACCESS_KEY. |
|
||||
| ARCHIVISTA_BLOB_STORE_ACCESS_KEY_ID | | Blob store access key id. Only valid when using BLOB storage backend. |
|
||||
| ARCHIVISTA_BLOB_STORE_SECRET_ACCESS_KEY_ID | | Blob store secret access key id. Only valid when using BLOB storage backend. |
|
||||
| ARCHIVISTA_BLOB_STORE_USE_TLS | TRUE | Use TLS for BLOB storage backend. Only valid when using BLOB storage backend. |
|
||||
| ARCHIVISTA_BLOB_STORE_BUCKET_NAME | | Bucket to use for storage. Only valid when using BLOB storage backend. |
|
||||
| ARCHIVISTA_ENABLE_GRAPHQL | TRUE | Enable GraphQL Endpoint. Archivista servers with GraphQL disabled cannot be used to verify Witness policies |
|
||||
| ARCHIVISTA_GRAPHQL_WEB_CLIENT_ENABLE | TRUE | Enable GraphiQL, the GraphQL web client |
|
||||
| ARCHIVISTA_ENABLE_ARTIFACT_STORE | FALSE | Enable Artifact Store Endpoints |
|
||||
| ARCHIVISTA_ARTIFACT_STORE_CONFIG | /tmp/artifacts/config.yaml | Location of the config describing available artifacts |
|
||||
| ARCHIVISTA_PUBLISHER | "" | Publisher to use. Options are DAPR, RSTUF. Supports multiple, Comma-separated list of String |
|
||||
| ARCHIVISTA_PUBLISHER_DAPR_HOST | localhost | Dapr host |
|
||||
| ARCHIVISTA_PUBLISHER_DAPR_PORT | 3500 | Dapr port |
|
||||
| ARCHIVISTA_PUBLISHER_DAPR_COMPONENT_NAME | "archivista" | Dapr pubsub component name |
|
||||
| ARCHIVISTA_PUBLISHER_DAPR_TOPIC | "attestations" | Dapr pubsub topic |
|
||||
| ARCHIVISTA_PUBLISHER_DAPR_URL | | Dapr full URL |
|
||||
| ARCHIVISTA_PUBLISHER_RSTUF_HOST | | RSTUF URL |
|
||||
|
||||
## Using Archivista
|
||||
|
||||
Archivista exposes two HTTP endpoints to upload or download attestations:
|
||||
|
||||
```
|
||||
```http
|
||||
POST /upload - Uploads an attestation to Archivista. The attestation is to be in the request's body
|
||||
```
|
||||
|
||||
```
|
||||
```http
|
||||
GET /download/:gitoid: - Downloads an attestation with provided gitoid from Archivista
|
||||
```
|
||||
|
||||
Additionally Archivista exposes a GraphQL API. By default the GraphQL playground is enabled and available at root.
|
||||
Additionally Archivista exposes a GraphQL API. By default the GraphQL playground
|
||||
is enabled and available at root.
|
||||
|
||||
`archivistactl` is a CLI tool in this repository that is available to interact with an Archivista instance. `archivistactl`
|
||||
is capable of uploading and downloading attestations as well as doing some basic queries such as finding all
|
||||
attestations with a specified subject and retrieving all subjects for a specified attestation.
|
||||
`archivistactl` is a CLI tool in this repository that is available to interact
|
||||
with an Archivista instance. `archivistactl` is capable of uploading and
|
||||
downloading attestations as well as doing some basic queries such as finding all
|
||||
attestations with a specified subject and retrieving all subjects for a
|
||||
specified attestation.
|
||||
|
||||
## Navigating the Graph
|
||||
|
||||
As previously mentioned, Archivista offers a GraphQL API that enables users to discover attestations. When Archivista ingests
|
||||
an attestation some metadata will be stored into the SQL metadata store. This metadata is exposed through the GraphQL API.
|
||||
Archivista uses [Relay connections](https://relay.dev/graphql/connections.htm) for querying and pagination.
|
||||
As previously mentioned, Archivista offers a GraphQL API that enables users to
|
||||
discover attestations. When Archivista ingests an attestation some metadata will
|
||||
be stored into the SQL metadata store. This metadata is exposed through the
|
||||
GraphQL API. Archivista uses
|
||||
[Relay connections](https://relay.dev/graphql/connections.htm) for querying and
|
||||
pagination.
|
||||
|
||||
Here is an entity relationship diagram of the metadata that is currently available.
|
||||
Here is an entity relationship diagram of the metadata that is currently
|
||||
available.
|
||||
|
||||
```mermaid
|
||||
erDiagram
|
||||
|
@ -174,16 +209,17 @@ timestamp {
|
|||
|
||||
## Deployment
|
||||
|
||||
Archivista can be easily deployed thru the provided helm chart into your kubernetes
|
||||
cluster. See the [README](chart/README.md) for more details.
|
||||
Archivista can be easily deployed thru the provided helm chart into your
|
||||
kubernetes cluster. See the [README](https://github.com/in-toto/helm-charts/blob/main/charts/archivista/README.md) for more details.
|
||||
|
||||
## What's Next
|
||||
|
||||
We would like to expand the types of data Archivista can ingest as well as expand the metadata Archivista collected about
|
||||
ingested data. If you have ideas or use cases for Archivista, feel free to [contact us](mailto:info@testifysec.io) or
|
||||
create an issue!
|
||||
|
||||
We would like to expand the types of data Archivista can ingest as well as
|
||||
expand the metadata Archivista collected about ingested data. If you have ideas
|
||||
or use cases for Archivista, feel free to
|
||||
[contact us](mailto:info@testifysec.io) or create an issue!
|
||||
|
||||
## Contributing
|
||||
|
||||
See [CONTRIBUTING.md](CONTRIBUTING.md) for information on how to contribute to Archivista.
|
||||
See [CONTRIBUTING.md](CONTRIBUTING.md) for information on how to contribute to
|
||||
Archivista.
|
||||
|
|
|
@ -1,23 +0,0 @@
|
|||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
|
@ -1,34 +0,0 @@
|
|||
# Copyright 2023 The Archivista Contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
apiVersion: v2
|
||||
name: archivista
|
||||
description: A Helm chart for Archivista
|
||||
|
||||
type: application
|
||||
version: 0.4.0
|
||||
|
||||
keywords:
|
||||
- attestation
|
||||
|
||||
home: https://github.com/in-toto/
|
||||
|
||||
sources:
|
||||
- https://github.com/in-toto/archivista/chart
|
||||
- https://github.com/in-toto/archivista
|
||||
|
||||
maintainers:
|
||||
- name: in-toto
|
||||
|
||||
appVersion: "0.4.0"
|
201
chart/LICENSE
201
chart/LICENSE
|
@ -1,201 +0,0 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -1,72 +0,0 @@
|
|||
# Archivista
|
||||
|
||||

|
||||
|
||||
Helm chart for Archivista - a graph and storage service for in-toto attestations.
|
||||
|
||||
## Requirements
|
||||
|
||||
A MySQL database and S3 compatible store are needed to successfully install this Helm chart.
|
||||
See [Archivista configuration](https://github.com/in-toto/archivista#configuration) for environment variables needed
|
||||
to establish connections to each datastore. These environment variables can be added to this Helm chart using the value `deployment.env[]`.
|
||||
|
||||
## Quick Installation
|
||||
|
||||
To install the helm chart with default values run following command.
|
||||
The [Values](#Values) section describes the configuration options for this chart.
|
||||
|
||||
```shell
|
||||
helm install archivista .
|
||||
```
|
||||
|
||||
## Uninstallation
|
||||
|
||||
To uninstall the Helm chart run following command.
|
||||
|
||||
```shell
|
||||
helm uninstall archivista
|
||||
```
|
||||
|
||||
## Maintainers
|
||||
|
||||
| Name | Email | Url |
|
||||
| --- | --- | --- |
|
||||
| in-toto project | | <https://github.com/in-toto/> |
|
||||
|
||||
## Source Code
|
||||
|
||||
* Helm chart: <https://github.com/in-toto/archivista/chart>
|
||||
* Archivista: <https://github.com/in-toto/archivista>
|
||||
|
||||
## Values
|
||||
|
||||
| Key | Type | Default |
|
||||
|--- |--- |--- |
|
||||
| affinity | object | `{}` |
|
||||
| autoscaling.enabled | bool | `false` |
|
||||
| autoscaling.maxReplicas | int | `10` |
|
||||
| autoscaling.minReplicas | int | `1` |
|
||||
| autoscaling.targetCPUUtilizationPercentage | int | `80` |
|
||||
| deployment.env | list | `[]` |
|
||||
| fullnameOverride | string | `""` |
|
||||
| image.pullPolicy | string | `"IfNotPresent"` |
|
||||
| image.repository | string | `"ghcr.io/testifysec/archivista"` |
|
||||
| image.tag | string | `"0.1.1"` |
|
||||
| ingress.annotations | object | `{}` |
|
||||
| ingress.className | string | `""` |
|
||||
| ingress.enabled | bool | `true` |
|
||||
| ingress.hosts[0].host | string | `"archivista.localhost"` |
|
||||
| ingress.hosts[0].path | string | `"/"` |
|
||||
| ingress.tls | list | `[]` |
|
||||
| nameOverride | string | `""` |
|
||||
| nodeSelector | object | `{}` |
|
||||
| podAnnotations | object | `{}` |
|
||||
| podSecurityContext | object | `{}` |
|
||||
| replicaCount | int | `1` |
|
||||
| resources | object | `{}` |
|
||||
| serviceAccount.annotations | object | `{}` |
|
||||
| serviceAccount.create | bool | `false` |
|
||||
| serviceAccount.name | string | `""` |
|
||||
| service.port | int | `8082` |
|
||||
| service.type | string | `"ClusterIP"` |
|
||||
| tolerations | list | `[]` |
|
|
@ -1,22 +0,0 @@
|
|||
1. If GraphiQL is enabled, you can visit the URL by running these commands:
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- range $host := .Values.ingress.hosts }}
|
||||
{{- range .paths }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "archivista.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "archivista.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "archivista.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "archivista.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
|
||||
{{- end }}
|
|
@ -1,62 +0,0 @@
|
|||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "archivista.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "archivista.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "archivista.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "archivista.labels" -}}
|
||||
helm.sh/chart: {{ include "archivista.chart" . }}
|
||||
{{ include "archivista.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "archivista.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "archivista.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "archivista.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
{{- default (include "archivista.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -1,54 +0,0 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "archivista.fullname" . }}
|
||||
labels:
|
||||
{{- include "archivista.labels" . | nindent 4 }}
|
||||
spec:
|
||||
{{- if not .Values.autoscaling.enabled }}
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "archivista.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
{{- with .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "archivista.selectorLabels" . | nindent 8 }}
|
||||
spec:
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
env:
|
||||
{{- toYaml .Values.deployment.env | nindent 12 }}
|
||||
{{- with .Values.deployment.envFrom }}
|
||||
envFrom:
|
||||
{{- toYaml . | nindent 12 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: graphql
|
||||
containerPort: 8082
|
||||
protocol: TCP
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
|
@ -1,61 +0,0 @@
|
|||
{{- if .Values.ingress.enabled -}}
|
||||
{{- $fullName := include "archivista.fullname" . -}}
|
||||
{{- $svcPort := .Values.service.port -}}
|
||||
{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
|
||||
{{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
|
||||
{{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
{{- else -}}
|
||||
apiVersion: extensions/v1beta1
|
||||
{{- end }}
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $fullName }}
|
||||
labels:
|
||||
{{- include "archivista.labels" . | nindent 4 }}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
|
||||
ingressClassName: {{ .Values.ingress.className }}
|
||||
{{- end }}
|
||||
{{- if .Values.ingress.tls }}
|
||||
tls:
|
||||
{{- range .Values.ingress.tls }}
|
||||
- hosts:
|
||||
{{- range .hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range .Values.ingress.hosts }}
|
||||
- host: {{ .host | quote }}
|
||||
http:
|
||||
paths:
|
||||
{{- range .paths }}
|
||||
- path: {{ .path }}
|
||||
{{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
|
||||
pathType: {{ .pathType }}
|
||||
{{- end }}
|
||||
backend:
|
||||
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
|
||||
service:
|
||||
name: {{ $fullName }}
|
||||
port:
|
||||
number: {{ $svcPort }}
|
||||
{{- else }}
|
||||
serviceName: {{ $fullName }}
|
||||
servicePort: {{ $svcPort }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -1,15 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "archivista.fullname" . }}
|
||||
labels:
|
||||
{{- include "archivista.labels" . | nindent 4 }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: graphql
|
||||
protocol: TCP
|
||||
name: graphql
|
||||
selector:
|
||||
{{- include "archivista.selectorLabels" . | nindent 4 }}
|
|
@ -1,12 +0,0 @@
|
|||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "archivista.serviceAccountName" . }}
|
||||
labels:
|
||||
{{- include "archivista.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -1,102 +0,0 @@
|
|||
# Copyright 2023 The Archivista Contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: ghcr.io/in-toto/archivista
|
||||
pullPolicy: IfNotPresent
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: "0.4.0"
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
serviceAccount:
|
||||
create: false
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
podSecurityContext: {}
|
||||
# fsGroup: 2000
|
||||
|
||||
securityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
|
||||
deployment:
|
||||
env: []
|
||||
# - name: ARCHIVISTA_SQL_STORE_CONNECTION_STRING
|
||||
# value: "user:pass@tcp(localhost:3306)/testify"
|
||||
# - name: ARCHIVISTA_STORAGE_BACKEND
|
||||
# value: "BLOB"
|
||||
# - name: ARCHIVISTA_BLOB_STORE_ENDPOINT
|
||||
# value: localhost:9000
|
||||
# - name: ARCHIVISTA_BLOB_STORE_ACCESS_KEY_ID
|
||||
# value: testifytestifytestify
|
||||
# - name: ARCHIVISTA_BLOB_STORE_SECRET_ACCESS_KEY_ID
|
||||
# value: exampleexampleexample
|
||||
# - name: ARCHIVISTA_BLOB_STORE_USE_TLS
|
||||
# value: "FALSE"
|
||||
# - name: ARCHIVISTA_BLOB_STORE_BUCKET_NAME
|
||||
# value: attestations
|
||||
|
||||
## Allows the specification of a configmap or secret to set all key-value pairs as environment variables for Archivista
|
||||
envFrom: []
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 8082
|
||||
|
||||
ingress:
|
||||
enabled: true
|
||||
className: ""
|
||||
annotations: {}
|
||||
hosts:
|
||||
- host: archivista.localhost
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - archivista.localhost
|
||||
|
||||
resources: {}
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
autoscaling:
|
||||
enabled: false
|
||||
minReplicas: 1
|
||||
maxReplicas: 10
|
||||
targetCPUUtilizationPercentage: 80
|
||||
# targetMemoryUtilizationPercentage: 80
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
|
@ -83,12 +83,19 @@ func main() {
|
|||
handlers.AllowedMethods([]string{"GET", "POST", "OPTIONS"}),
|
||||
handlers.AllowedHeaders([]string{"Accept", "Content-Type", "Content-Length", "Accept-Encoding", "X-CSRF-Token", "Authorization"}),
|
||||
)(server.Router()),
|
||||
ReadTimeout: 5 * time.Second,
|
||||
WriteTimeout: 10 * time.Second,
|
||||
ReadTimeout: time.Duration(archivistaService.Cfg.ReadTimeout) * time.Second,
|
||||
WriteTimeout: time.Duration(archivistaService.Cfg.WriteTimeout) * time.Second,
|
||||
}
|
||||
|
||||
go func() {
|
||||
if err := srv.Serve(listener); err != nil {
|
||||
logrus.Fatalf("unable to start http server: %+v", err)
|
||||
if archivistaService.Cfg.EnableTLS {
|
||||
if err := srv.ServeTLS(listener, archivistaService.Cfg.TLSCert, archivistaService.Cfg.TLSKey); err != nil {
|
||||
logrus.Fatalf("unable to start http server: %+v", err)
|
||||
}
|
||||
} else {
|
||||
if err := srv.Serve(listener); err != nil {
|
||||
logrus.Fatalf("unable to start http server: %+v", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ var (
|
|||
out = file
|
||||
}
|
||||
|
||||
return api.DownloadWithWriter(cmd.Context(), archivistaUrl, args[0], out)
|
||||
return api.DownloadWithWriter(cmd.Context(), archivistaUrl, args[0], out, requestOptions()...)
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -60,7 +60,7 @@ var (
|
|||
SilenceUsage: true,
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
results, err := api.GraphQlQuery[retrieveSubjectResults](cmd.Context(), archivistaUrl, retrieveSubjectsQuery, retrieveSubjectVars{Gitoid: args[0]})
|
||||
results, err := api.GraphQlQuery[retrieveSubjectResults](cmd.Context(), archivistaUrl, retrieveSubjectsQuery, retrieveSubjectVars{Gitoid: args[0]}, requestOptions()...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -62,7 +62,7 @@ func (ut *UTRetrieveSuite) Test_RetrieveEnvelope_NoDB() {
|
|||
rootCmd.SetArgs([]string{"retrieve", "envelope", "test"})
|
||||
err := rootCmd.Execute()
|
||||
if err != nil {
|
||||
ut.ErrorContains(err, "connection refused")
|
||||
ut.ErrorContains(err, "connection re")
|
||||
} else {
|
||||
ut.FailNow("Expected: error")
|
||||
}
|
||||
|
@ -88,7 +88,7 @@ func (ut *UTRetrieveSuite) Test_RetrieveSubjectsNoDB() {
|
|||
rootCmd.SetArgs([]string{"retrieve", "subjects", "test"})
|
||||
err := rootCmd.Execute()
|
||||
if err != nil {
|
||||
ut.ErrorContains(err, "connection refused")
|
||||
ut.ErrorContains(err, "connection re")
|
||||
} else {
|
||||
ut.FailNow("Expected: error")
|
||||
}
|
||||
|
|
|
@ -15,11 +15,17 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/in-toto/archivista/pkg/api"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
archivistaUrl string
|
||||
archivistaUrl string
|
||||
requestHeaders []string
|
||||
|
||||
rootCmd = &cobra.Command{
|
||||
Use: "archivistactl",
|
||||
|
@ -29,8 +35,28 @@ var (
|
|||
|
||||
func init() {
|
||||
rootCmd.PersistentFlags().StringVarP(&archivistaUrl, "archivistaurl", "u", "http://localhost:8082", "url of the archivista instance")
|
||||
rootCmd.PersistentFlags().StringArrayVarP(&requestHeaders, "headers", "H", []string{}, "headers to use when making requests to archivista")
|
||||
}
|
||||
|
||||
func Execute() error {
|
||||
return rootCmd.Execute()
|
||||
}
|
||||
|
||||
func requestOptions() []api.RequestOption {
|
||||
opts := []api.RequestOption{}
|
||||
headers := http.Header{}
|
||||
for _, header := range requestHeaders {
|
||||
headerParts := strings.SplitN(header, ":", 2)
|
||||
if len(headerParts) != 2 {
|
||||
log.Fatalf("invalid header: %v", header)
|
||||
}
|
||||
|
||||
headers.Set(headerParts[0], headerParts[1])
|
||||
}
|
||||
|
||||
if len(headers) > 0 {
|
||||
opts = append(opts, api.WithHeaders(headers))
|
||||
}
|
||||
|
||||
return opts
|
||||
}
|
||||
|
|
|
@ -22,42 +22,40 @@ import (
|
|||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
searchCmd = &cobra.Command{
|
||||
Use: "search",
|
||||
Short: "Searches the archivista instance for an attestation matching a query",
|
||||
SilenceUsage: true,
|
||||
Long: `Searches the archivista instance for an envelope with a specified subject digest.
|
||||
var searchCmd = &cobra.Command{
|
||||
Use: "search",
|
||||
Short: "Searches the archivista instance for an attestation matching a query",
|
||||
SilenceUsage: true,
|
||||
Long: `Searches the archivista instance for an envelope with a specified subject digest.
|
||||
Optionally a collection name can be provided to further constrain results.
|
||||
|
||||
Digests are expected to be in the form algorithm:digest, for instance: sha256:456c0c9a7c05e2a7f84c139bbacedbe3e8e88f9c`,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) != 1 {
|
||||
return errors.New("expected exactly 1 argument")
|
||||
}
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) != 1 {
|
||||
return errors.New("expected exactly 1 argument")
|
||||
}
|
||||
|
||||
if _, _, err := validateDigestString(args[0]); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, _, err := validateDigestString(args[0]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
algo, digest, err := validateDigestString(args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
algo, digest, err := validateDigestString(args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
results, err := api.GraphQlQuery[searchResults](cmd.Context(), archivistaUrl, searchQuery, searchVars{Algorithm: algo, Digest: digest})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
results, err := api.GraphQlQuery[searchResults](cmd.Context(), archivistaUrl, searchQuery, searchVars{Algorithm: algo, Digest: digest}, requestOptions()...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
printResults(results)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
)
|
||||
printResults(results)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(searchCmd)
|
||||
|
|
|
@ -63,7 +63,7 @@ func (ut *UTSearchSuite) Test_NoDB() {
|
|||
rootCmd.SetArgs([]string{"search", "sha256:test"})
|
||||
err := rootCmd.Execute()
|
||||
if err != nil {
|
||||
ut.ErrorContains(err, "connection refused")
|
||||
ut.ErrorContains(err, "connection re")
|
||||
} else {
|
||||
ut.FailNow("Expected: error")
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2022 The Archivista Contributors
|
||||
// Copyright 2022-2024 The Archivista Contributors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -23,25 +23,23 @@ import (
|
|||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
storeCmd = &cobra.Command{
|
||||
Use: "store",
|
||||
Short: "stores an attestation on the archivista server",
|
||||
SilenceUsage: true,
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
for _, filePath := range args {
|
||||
if gitoid, err := storeAttestationByPath(cmd.Context(), archivistaUrl, filePath); err != nil {
|
||||
return fmt.Errorf("failed to store %s: %w", filePath, err)
|
||||
} else {
|
||||
rootCmd.Printf("%s stored with gitoid %s\n", filePath, gitoid)
|
||||
}
|
||||
var storeCmd = &cobra.Command{
|
||||
Use: "store",
|
||||
Short: "stores an attestation on the archivista server",
|
||||
SilenceUsage: true,
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
for _, filePath := range args {
|
||||
if gitoid, err := storeAttestationByPath(cmd.Context(), archivistaUrl, filePath); err != nil {
|
||||
return fmt.Errorf("failed to store %s: %w", filePath, err)
|
||||
} else {
|
||||
rootCmd.Printf("%s stored with gitoid %s\n", filePath, gitoid)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(storeCmd)
|
||||
|
@ -54,7 +52,7 @@ func storeAttestationByPath(ctx context.Context, baseUrl, path string) (string,
|
|||
}
|
||||
|
||||
defer file.Close()
|
||||
resp, err := api.UploadWithReader(ctx, baseUrl, file)
|
||||
resp, err := api.StoreWithReader(ctx, baseUrl, file, requestOptions()...)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@ package archivista
|
|||
|
||||
// This file will be automatically regenerated based on the schema, any resolver implementations
|
||||
// will be copied through when generating and any unknown code will be moved to the end.
|
||||
// Code generated by github.com/99designs/gqlgen version v0.17.47
|
||||
// Code generated by github.com/99designs/gqlgen version v0.17.73
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
|
|
@ -103,7 +103,7 @@ func (ac *AttestationCreate) check() error {
|
|||
return &ValidationError{Name: "type", err: fmt.Errorf(`ent: validator failed for field "Attestation.type": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _, ok := ac.mutation.AttestationCollectionID(); !ok {
|
||||
if len(ac.mutation.AttestationCollectionIDs()) == 0 {
|
||||
return &ValidationError{Name: "attestation_collection", err: errors.New(`ent: missing required edge "Attestation.attestation_collection"`)}
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"fmt"
|
||||
"math"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
|
@ -88,7 +89,7 @@ func (aq *AttestationQuery) QueryAttestationCollection() *AttestationCollectionQ
|
|||
// First returns the first Attestation entity from the query.
|
||||
// Returns a *NotFoundError when no Attestation was found.
|
||||
func (aq *AttestationQuery) First(ctx context.Context) (*Attestation, error) {
|
||||
nodes, err := aq.Limit(1).All(setContextOp(ctx, aq.ctx, "First"))
|
||||
nodes, err := aq.Limit(1).All(setContextOp(ctx, aq.ctx, ent.OpQueryFirst))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -111,7 +112,7 @@ func (aq *AttestationQuery) FirstX(ctx context.Context) *Attestation {
|
|||
// Returns a *NotFoundError when no Attestation ID was found.
|
||||
func (aq *AttestationQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
|
||||
var ids []uuid.UUID
|
||||
if ids, err = aq.Limit(1).IDs(setContextOp(ctx, aq.ctx, "FirstID")); err != nil {
|
||||
if ids, err = aq.Limit(1).IDs(setContextOp(ctx, aq.ctx, ent.OpQueryFirstID)); err != nil {
|
||||
return
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
|
@ -134,7 +135,7 @@ func (aq *AttestationQuery) FirstIDX(ctx context.Context) uuid.UUID {
|
|||
// Returns a *NotSingularError when more than one Attestation entity is found.
|
||||
// Returns a *NotFoundError when no Attestation entities are found.
|
||||
func (aq *AttestationQuery) Only(ctx context.Context) (*Attestation, error) {
|
||||
nodes, err := aq.Limit(2).All(setContextOp(ctx, aq.ctx, "Only"))
|
||||
nodes, err := aq.Limit(2).All(setContextOp(ctx, aq.ctx, ent.OpQueryOnly))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -162,7 +163,7 @@ func (aq *AttestationQuery) OnlyX(ctx context.Context) *Attestation {
|
|||
// Returns a *NotFoundError when no entities are found.
|
||||
func (aq *AttestationQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
|
||||
var ids []uuid.UUID
|
||||
if ids, err = aq.Limit(2).IDs(setContextOp(ctx, aq.ctx, "OnlyID")); err != nil {
|
||||
if ids, err = aq.Limit(2).IDs(setContextOp(ctx, aq.ctx, ent.OpQueryOnlyID)); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(ids) {
|
||||
|
@ -187,7 +188,7 @@ func (aq *AttestationQuery) OnlyIDX(ctx context.Context) uuid.UUID {
|
|||
|
||||
// All executes the query and returns a list of Attestations.
|
||||
func (aq *AttestationQuery) All(ctx context.Context) ([]*Attestation, error) {
|
||||
ctx = setContextOp(ctx, aq.ctx, "All")
|
||||
ctx = setContextOp(ctx, aq.ctx, ent.OpQueryAll)
|
||||
if err := aq.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -209,7 +210,7 @@ func (aq *AttestationQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error
|
|||
if aq.ctx.Unique == nil && aq.path != nil {
|
||||
aq.Unique(true)
|
||||
}
|
||||
ctx = setContextOp(ctx, aq.ctx, "IDs")
|
||||
ctx = setContextOp(ctx, aq.ctx, ent.OpQueryIDs)
|
||||
if err = aq.Select(attestation.FieldID).Scan(ctx, &ids); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -227,7 +228,7 @@ func (aq *AttestationQuery) IDsX(ctx context.Context) []uuid.UUID {
|
|||
|
||||
// Count returns the count of the given query.
|
||||
func (aq *AttestationQuery) Count(ctx context.Context) (int, error) {
|
||||
ctx = setContextOp(ctx, aq.ctx, "Count")
|
||||
ctx = setContextOp(ctx, aq.ctx, ent.OpQueryCount)
|
||||
if err := aq.prepareQuery(ctx); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -245,7 +246,7 @@ func (aq *AttestationQuery) CountX(ctx context.Context) int {
|
|||
|
||||
// Exist returns true if the query has elements in the graph.
|
||||
func (aq *AttestationQuery) Exist(ctx context.Context) (bool, error) {
|
||||
ctx = setContextOp(ctx, aq.ctx, "Exist")
|
||||
ctx = setContextOp(ctx, aq.ctx, ent.OpQueryExist)
|
||||
switch _, err := aq.FirstID(ctx); {
|
||||
case IsNotFound(err):
|
||||
return false, nil
|
||||
|
@ -550,7 +551,7 @@ func (agb *AttestationGroupBy) Aggregate(fns ...AggregateFunc) *AttestationGroup
|
|||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (agb *AttestationGroupBy) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, agb.build.ctx, "GroupBy")
|
||||
ctx = setContextOp(ctx, agb.build.ctx, ent.OpQueryGroupBy)
|
||||
if err := agb.build.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -598,7 +599,7 @@ func (as *AttestationSelect) Aggregate(fns ...AggregateFunc) *AttestationSelect
|
|||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (as *AttestationSelect) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, as.ctx, "Select")
|
||||
ctx = setContextOp(ctx, as.ctx, ent.OpQuerySelect)
|
||||
if err := as.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -99,7 +99,7 @@ func (au *AttestationUpdate) check() error {
|
|||
return &ValidationError{Name: "type", err: fmt.Errorf(`ent: validator failed for field "Attestation.type": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _, ok := au.mutation.AttestationCollectionID(); au.mutation.AttestationCollectionCleared() && !ok {
|
||||
if au.mutation.AttestationCollectionCleared() && len(au.mutation.AttestationCollectionIDs()) > 0 {
|
||||
return errors.New(`ent: clearing a required unique edge "Attestation.attestation_collection"`)
|
||||
}
|
||||
return nil
|
||||
|
@ -252,7 +252,7 @@ func (auo *AttestationUpdateOne) check() error {
|
|||
return &ValidationError{Name: "type", err: fmt.Errorf(`ent: validator failed for field "Attestation.type": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _, ok := auo.mutation.AttestationCollectionID(); auo.mutation.AttestationCollectionCleared() && !ok {
|
||||
if auo.mutation.AttestationCollectionCleared() && len(auo.mutation.AttestationCollectionIDs()) > 0 {
|
||||
return errors.New(`ent: clearing a required unique edge "Attestation.attestation_collection"`)
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -119,7 +119,7 @@ func (acc *AttestationCollectionCreate) check() error {
|
|||
return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "AttestationCollection.name": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _, ok := acc.mutation.StatementID(); !ok {
|
||||
if len(acc.mutation.StatementIDs()) == 0 {
|
||||
return &ValidationError{Name: "statement", err: errors.New(`ent: missing required edge "AttestationCollection.statement"`)}
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"fmt"
|
||||
"math"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
|
@ -114,7 +115,7 @@ func (acq *AttestationCollectionQuery) QueryStatement() *StatementQuery {
|
|||
// First returns the first AttestationCollection entity from the query.
|
||||
// Returns a *NotFoundError when no AttestationCollection was found.
|
||||
func (acq *AttestationCollectionQuery) First(ctx context.Context) (*AttestationCollection, error) {
|
||||
nodes, err := acq.Limit(1).All(setContextOp(ctx, acq.ctx, "First"))
|
||||
nodes, err := acq.Limit(1).All(setContextOp(ctx, acq.ctx, ent.OpQueryFirst))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -137,7 +138,7 @@ func (acq *AttestationCollectionQuery) FirstX(ctx context.Context) *AttestationC
|
|||
// Returns a *NotFoundError when no AttestationCollection ID was found.
|
||||
func (acq *AttestationCollectionQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
|
||||
var ids []uuid.UUID
|
||||
if ids, err = acq.Limit(1).IDs(setContextOp(ctx, acq.ctx, "FirstID")); err != nil {
|
||||
if ids, err = acq.Limit(1).IDs(setContextOp(ctx, acq.ctx, ent.OpQueryFirstID)); err != nil {
|
||||
return
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
|
@ -160,7 +161,7 @@ func (acq *AttestationCollectionQuery) FirstIDX(ctx context.Context) uuid.UUID {
|
|||
// Returns a *NotSingularError when more than one AttestationCollection entity is found.
|
||||
// Returns a *NotFoundError when no AttestationCollection entities are found.
|
||||
func (acq *AttestationCollectionQuery) Only(ctx context.Context) (*AttestationCollection, error) {
|
||||
nodes, err := acq.Limit(2).All(setContextOp(ctx, acq.ctx, "Only"))
|
||||
nodes, err := acq.Limit(2).All(setContextOp(ctx, acq.ctx, ent.OpQueryOnly))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -188,7 +189,7 @@ func (acq *AttestationCollectionQuery) OnlyX(ctx context.Context) *AttestationCo
|
|||
// Returns a *NotFoundError when no entities are found.
|
||||
func (acq *AttestationCollectionQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
|
||||
var ids []uuid.UUID
|
||||
if ids, err = acq.Limit(2).IDs(setContextOp(ctx, acq.ctx, "OnlyID")); err != nil {
|
||||
if ids, err = acq.Limit(2).IDs(setContextOp(ctx, acq.ctx, ent.OpQueryOnlyID)); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(ids) {
|
||||
|
@ -213,7 +214,7 @@ func (acq *AttestationCollectionQuery) OnlyIDX(ctx context.Context) uuid.UUID {
|
|||
|
||||
// All executes the query and returns a list of AttestationCollections.
|
||||
func (acq *AttestationCollectionQuery) All(ctx context.Context) ([]*AttestationCollection, error) {
|
||||
ctx = setContextOp(ctx, acq.ctx, "All")
|
||||
ctx = setContextOp(ctx, acq.ctx, ent.OpQueryAll)
|
||||
if err := acq.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -235,7 +236,7 @@ func (acq *AttestationCollectionQuery) IDs(ctx context.Context) (ids []uuid.UUID
|
|||
if acq.ctx.Unique == nil && acq.path != nil {
|
||||
acq.Unique(true)
|
||||
}
|
||||
ctx = setContextOp(ctx, acq.ctx, "IDs")
|
||||
ctx = setContextOp(ctx, acq.ctx, ent.OpQueryIDs)
|
||||
if err = acq.Select(attestationcollection.FieldID).Scan(ctx, &ids); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -253,7 +254,7 @@ func (acq *AttestationCollectionQuery) IDsX(ctx context.Context) []uuid.UUID {
|
|||
|
||||
// Count returns the count of the given query.
|
||||
func (acq *AttestationCollectionQuery) Count(ctx context.Context) (int, error) {
|
||||
ctx = setContextOp(ctx, acq.ctx, "Count")
|
||||
ctx = setContextOp(ctx, acq.ctx, ent.OpQueryCount)
|
||||
if err := acq.prepareQuery(ctx); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -271,7 +272,7 @@ func (acq *AttestationCollectionQuery) CountX(ctx context.Context) int {
|
|||
|
||||
// Exist returns true if the query has elements in the graph.
|
||||
func (acq *AttestationCollectionQuery) Exist(ctx context.Context) (bool, error) {
|
||||
ctx = setContextOp(ctx, acq.ctx, "Exist")
|
||||
ctx = setContextOp(ctx, acq.ctx, ent.OpQueryExist)
|
||||
switch _, err := acq.FirstID(ctx); {
|
||||
case IsNotFound(err):
|
||||
return false, nil
|
||||
|
@ -648,7 +649,7 @@ func (acgb *AttestationCollectionGroupBy) Aggregate(fns ...AggregateFunc) *Attes
|
|||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (acgb *AttestationCollectionGroupBy) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, acgb.build.ctx, "GroupBy")
|
||||
ctx = setContextOp(ctx, acgb.build.ctx, ent.OpQueryGroupBy)
|
||||
if err := acgb.build.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -696,7 +697,7 @@ func (acs *AttestationCollectionSelect) Aggregate(fns ...AggregateFunc) *Attesta
|
|||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (acs *AttestationCollectionSelect) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, acs.ctx, "Select")
|
||||
ctx = setContextOp(ctx, acs.ctx, ent.OpQuerySelect)
|
||||
if err := acs.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -136,7 +136,7 @@ func (acu *AttestationCollectionUpdate) check() error {
|
|||
return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "AttestationCollection.name": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _, ok := acu.mutation.StatementID(); acu.mutation.StatementCleared() && !ok {
|
||||
if acu.mutation.StatementCleared() && len(acu.mutation.StatementIDs()) > 0 {
|
||||
return errors.New(`ent: clearing a required unique edge "AttestationCollection.statement"`)
|
||||
}
|
||||
return nil
|
||||
|
@ -370,7 +370,7 @@ func (acuo *AttestationCollectionUpdateOne) check() error {
|
|||
return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "AttestationCollection.name": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _, ok := acuo.mutation.StatementID(); acuo.mutation.StatementCleared() && !ok {
|
||||
if acuo.mutation.StatementCleared() && len(acuo.mutation.StatementIDs()) > 0 {
|
||||
return errors.New(`ent: clearing a required unique edge "AttestationCollection.statement"`)
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"fmt"
|
||||
"math"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
|
@ -88,7 +89,7 @@ func (apq *AttestationPolicyQuery) QueryStatement() *StatementQuery {
|
|||
// First returns the first AttestationPolicy entity from the query.
|
||||
// Returns a *NotFoundError when no AttestationPolicy was found.
|
||||
func (apq *AttestationPolicyQuery) First(ctx context.Context) (*AttestationPolicy, error) {
|
||||
nodes, err := apq.Limit(1).All(setContextOp(ctx, apq.ctx, "First"))
|
||||
nodes, err := apq.Limit(1).All(setContextOp(ctx, apq.ctx, ent.OpQueryFirst))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -111,7 +112,7 @@ func (apq *AttestationPolicyQuery) FirstX(ctx context.Context) *AttestationPolic
|
|||
// Returns a *NotFoundError when no AttestationPolicy ID was found.
|
||||
func (apq *AttestationPolicyQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
|
||||
var ids []uuid.UUID
|
||||
if ids, err = apq.Limit(1).IDs(setContextOp(ctx, apq.ctx, "FirstID")); err != nil {
|
||||
if ids, err = apq.Limit(1).IDs(setContextOp(ctx, apq.ctx, ent.OpQueryFirstID)); err != nil {
|
||||
return
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
|
@ -134,7 +135,7 @@ func (apq *AttestationPolicyQuery) FirstIDX(ctx context.Context) uuid.UUID {
|
|||
// Returns a *NotSingularError when more than one AttestationPolicy entity is found.
|
||||
// Returns a *NotFoundError when no AttestationPolicy entities are found.
|
||||
func (apq *AttestationPolicyQuery) Only(ctx context.Context) (*AttestationPolicy, error) {
|
||||
nodes, err := apq.Limit(2).All(setContextOp(ctx, apq.ctx, "Only"))
|
||||
nodes, err := apq.Limit(2).All(setContextOp(ctx, apq.ctx, ent.OpQueryOnly))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -162,7 +163,7 @@ func (apq *AttestationPolicyQuery) OnlyX(ctx context.Context) *AttestationPolicy
|
|||
// Returns a *NotFoundError when no entities are found.
|
||||
func (apq *AttestationPolicyQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
|
||||
var ids []uuid.UUID
|
||||
if ids, err = apq.Limit(2).IDs(setContextOp(ctx, apq.ctx, "OnlyID")); err != nil {
|
||||
if ids, err = apq.Limit(2).IDs(setContextOp(ctx, apq.ctx, ent.OpQueryOnlyID)); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(ids) {
|
||||
|
@ -187,7 +188,7 @@ func (apq *AttestationPolicyQuery) OnlyIDX(ctx context.Context) uuid.UUID {
|
|||
|
||||
// All executes the query and returns a list of AttestationPolicies.
|
||||
func (apq *AttestationPolicyQuery) All(ctx context.Context) ([]*AttestationPolicy, error) {
|
||||
ctx = setContextOp(ctx, apq.ctx, "All")
|
||||
ctx = setContextOp(ctx, apq.ctx, ent.OpQueryAll)
|
||||
if err := apq.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -209,7 +210,7 @@ func (apq *AttestationPolicyQuery) IDs(ctx context.Context) (ids []uuid.UUID, er
|
|||
if apq.ctx.Unique == nil && apq.path != nil {
|
||||
apq.Unique(true)
|
||||
}
|
||||
ctx = setContextOp(ctx, apq.ctx, "IDs")
|
||||
ctx = setContextOp(ctx, apq.ctx, ent.OpQueryIDs)
|
||||
if err = apq.Select(attestationpolicy.FieldID).Scan(ctx, &ids); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -227,7 +228,7 @@ func (apq *AttestationPolicyQuery) IDsX(ctx context.Context) []uuid.UUID {
|
|||
|
||||
// Count returns the count of the given query.
|
||||
func (apq *AttestationPolicyQuery) Count(ctx context.Context) (int, error) {
|
||||
ctx = setContextOp(ctx, apq.ctx, "Count")
|
||||
ctx = setContextOp(ctx, apq.ctx, ent.OpQueryCount)
|
||||
if err := apq.prepareQuery(ctx); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -245,7 +246,7 @@ func (apq *AttestationPolicyQuery) CountX(ctx context.Context) int {
|
|||
|
||||
// Exist returns true if the query has elements in the graph.
|
||||
func (apq *AttestationPolicyQuery) Exist(ctx context.Context) (bool, error) {
|
||||
ctx = setContextOp(ctx, apq.ctx, "Exist")
|
||||
ctx = setContextOp(ctx, apq.ctx, ent.OpQueryExist)
|
||||
switch _, err := apq.FirstID(ctx); {
|
||||
case IsNotFound(err):
|
||||
return false, nil
|
||||
|
@ -550,7 +551,7 @@ func (apgb *AttestationPolicyGroupBy) Aggregate(fns ...AggregateFunc) *Attestati
|
|||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (apgb *AttestationPolicyGroupBy) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, apgb.build.ctx, "GroupBy")
|
||||
ctx = setContextOp(ctx, apgb.build.ctx, ent.OpQueryGroupBy)
|
||||
if err := apgb.build.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -598,7 +599,7 @@ func (aps *AttestationPolicySelect) Aggregate(fns ...AggregateFunc) *Attestation
|
|||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (aps *AttestationPolicySelect) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, aps.ctx, "Select")
|
||||
ctx = setContextOp(ctx, aps.ctx, ent.OpQuerySelect)
|
||||
if err := aps.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"fmt"
|
||||
"math"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
|
@ -139,7 +140,7 @@ func (dq *DsseQuery) QueryPayloadDigests() *PayloadDigestQuery {
|
|||
// First returns the first Dsse entity from the query.
|
||||
// Returns a *NotFoundError when no Dsse was found.
|
||||
func (dq *DsseQuery) First(ctx context.Context) (*Dsse, error) {
|
||||
nodes, err := dq.Limit(1).All(setContextOp(ctx, dq.ctx, "First"))
|
||||
nodes, err := dq.Limit(1).All(setContextOp(ctx, dq.ctx, ent.OpQueryFirst))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -162,7 +163,7 @@ func (dq *DsseQuery) FirstX(ctx context.Context) *Dsse {
|
|||
// Returns a *NotFoundError when no Dsse ID was found.
|
||||
func (dq *DsseQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
|
||||
var ids []uuid.UUID
|
||||
if ids, err = dq.Limit(1).IDs(setContextOp(ctx, dq.ctx, "FirstID")); err != nil {
|
||||
if ids, err = dq.Limit(1).IDs(setContextOp(ctx, dq.ctx, ent.OpQueryFirstID)); err != nil {
|
||||
return
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
|
@ -185,7 +186,7 @@ func (dq *DsseQuery) FirstIDX(ctx context.Context) uuid.UUID {
|
|||
// Returns a *NotSingularError when more than one Dsse entity is found.
|
||||
// Returns a *NotFoundError when no Dsse entities are found.
|
||||
func (dq *DsseQuery) Only(ctx context.Context) (*Dsse, error) {
|
||||
nodes, err := dq.Limit(2).All(setContextOp(ctx, dq.ctx, "Only"))
|
||||
nodes, err := dq.Limit(2).All(setContextOp(ctx, dq.ctx, ent.OpQueryOnly))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -213,7 +214,7 @@ func (dq *DsseQuery) OnlyX(ctx context.Context) *Dsse {
|
|||
// Returns a *NotFoundError when no entities are found.
|
||||
func (dq *DsseQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
|
||||
var ids []uuid.UUID
|
||||
if ids, err = dq.Limit(2).IDs(setContextOp(ctx, dq.ctx, "OnlyID")); err != nil {
|
||||
if ids, err = dq.Limit(2).IDs(setContextOp(ctx, dq.ctx, ent.OpQueryOnlyID)); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(ids) {
|
||||
|
@ -238,7 +239,7 @@ func (dq *DsseQuery) OnlyIDX(ctx context.Context) uuid.UUID {
|
|||
|
||||
// All executes the query and returns a list of Dsses.
|
||||
func (dq *DsseQuery) All(ctx context.Context) ([]*Dsse, error) {
|
||||
ctx = setContextOp(ctx, dq.ctx, "All")
|
||||
ctx = setContextOp(ctx, dq.ctx, ent.OpQueryAll)
|
||||
if err := dq.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -260,7 +261,7 @@ func (dq *DsseQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) {
|
|||
if dq.ctx.Unique == nil && dq.path != nil {
|
||||
dq.Unique(true)
|
||||
}
|
||||
ctx = setContextOp(ctx, dq.ctx, "IDs")
|
||||
ctx = setContextOp(ctx, dq.ctx, ent.OpQueryIDs)
|
||||
if err = dq.Select(dsse.FieldID).Scan(ctx, &ids); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -278,7 +279,7 @@ func (dq *DsseQuery) IDsX(ctx context.Context) []uuid.UUID {
|
|||
|
||||
// Count returns the count of the given query.
|
||||
func (dq *DsseQuery) Count(ctx context.Context) (int, error) {
|
||||
ctx = setContextOp(ctx, dq.ctx, "Count")
|
||||
ctx = setContextOp(ctx, dq.ctx, ent.OpQueryCount)
|
||||
if err := dq.prepareQuery(ctx); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -296,7 +297,7 @@ func (dq *DsseQuery) CountX(ctx context.Context) int {
|
|||
|
||||
// Exist returns true if the query has elements in the graph.
|
||||
func (dq *DsseQuery) Exist(ctx context.Context) (bool, error) {
|
||||
ctx = setContextOp(ctx, dq.ctx, "Exist")
|
||||
ctx = setContextOp(ctx, dq.ctx, ent.OpQueryExist)
|
||||
switch _, err := dq.FirstID(ctx); {
|
||||
case IsNotFound(err):
|
||||
return false, nil
|
||||
|
@ -745,7 +746,7 @@ func (dgb *DsseGroupBy) Aggregate(fns ...AggregateFunc) *DsseGroupBy {
|
|||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (dgb *DsseGroupBy) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, dgb.build.ctx, "GroupBy")
|
||||
ctx = setContextOp(ctx, dgb.build.ctx, ent.OpQueryGroupBy)
|
||||
if err := dgb.build.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -793,7 +794,7 @@ func (ds *DsseSelect) Aggregate(fns ...AggregateFunc) *DsseSelect {
|
|||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (ds *DsseSelect) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, ds.ctx, "Select")
|
||||
ctx = setContextOp(ctx, ds.ctx, ent.OpQuerySelect)
|
||||
if err := ds.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -78,7 +78,7 @@ var (
|
|||
columnCheck sql.ColumnCheck
|
||||
)
|
||||
|
||||
// columnChecker checks if the column exists in the given table.
|
||||
// checkColumn checks if the column exists in the given table.
|
||||
func checkColumn(table, column string) error {
|
||||
initCheck.Do(func() {
|
||||
columnCheck = sql.NewColumnCheck(map[string]func(string) bool{
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"fmt"
|
||||
"math"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
|
@ -88,7 +89,7 @@ func (pdq *PayloadDigestQuery) QueryDsse() *DsseQuery {
|
|||
// First returns the first PayloadDigest entity from the query.
|
||||
// Returns a *NotFoundError when no PayloadDigest was found.
|
||||
func (pdq *PayloadDigestQuery) First(ctx context.Context) (*PayloadDigest, error) {
|
||||
nodes, err := pdq.Limit(1).All(setContextOp(ctx, pdq.ctx, "First"))
|
||||
nodes, err := pdq.Limit(1).All(setContextOp(ctx, pdq.ctx, ent.OpQueryFirst))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -111,7 +112,7 @@ func (pdq *PayloadDigestQuery) FirstX(ctx context.Context) *PayloadDigest {
|
|||
// Returns a *NotFoundError when no PayloadDigest ID was found.
|
||||
func (pdq *PayloadDigestQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
|
||||
var ids []uuid.UUID
|
||||
if ids, err = pdq.Limit(1).IDs(setContextOp(ctx, pdq.ctx, "FirstID")); err != nil {
|
||||
if ids, err = pdq.Limit(1).IDs(setContextOp(ctx, pdq.ctx, ent.OpQueryFirstID)); err != nil {
|
||||
return
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
|
@ -134,7 +135,7 @@ func (pdq *PayloadDigestQuery) FirstIDX(ctx context.Context) uuid.UUID {
|
|||
// Returns a *NotSingularError when more than one PayloadDigest entity is found.
|
||||
// Returns a *NotFoundError when no PayloadDigest entities are found.
|
||||
func (pdq *PayloadDigestQuery) Only(ctx context.Context) (*PayloadDigest, error) {
|
||||
nodes, err := pdq.Limit(2).All(setContextOp(ctx, pdq.ctx, "Only"))
|
||||
nodes, err := pdq.Limit(2).All(setContextOp(ctx, pdq.ctx, ent.OpQueryOnly))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -162,7 +163,7 @@ func (pdq *PayloadDigestQuery) OnlyX(ctx context.Context) *PayloadDigest {
|
|||
// Returns a *NotFoundError when no entities are found.
|
||||
func (pdq *PayloadDigestQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
|
||||
var ids []uuid.UUID
|
||||
if ids, err = pdq.Limit(2).IDs(setContextOp(ctx, pdq.ctx, "OnlyID")); err != nil {
|
||||
if ids, err = pdq.Limit(2).IDs(setContextOp(ctx, pdq.ctx, ent.OpQueryOnlyID)); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(ids) {
|
||||
|
@ -187,7 +188,7 @@ func (pdq *PayloadDigestQuery) OnlyIDX(ctx context.Context) uuid.UUID {
|
|||
|
||||
// All executes the query and returns a list of PayloadDigests.
|
||||
func (pdq *PayloadDigestQuery) All(ctx context.Context) ([]*PayloadDigest, error) {
|
||||
ctx = setContextOp(ctx, pdq.ctx, "All")
|
||||
ctx = setContextOp(ctx, pdq.ctx, ent.OpQueryAll)
|
||||
if err := pdq.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -209,7 +210,7 @@ func (pdq *PayloadDigestQuery) IDs(ctx context.Context) (ids []uuid.UUID, err er
|
|||
if pdq.ctx.Unique == nil && pdq.path != nil {
|
||||
pdq.Unique(true)
|
||||
}
|
||||
ctx = setContextOp(ctx, pdq.ctx, "IDs")
|
||||
ctx = setContextOp(ctx, pdq.ctx, ent.OpQueryIDs)
|
||||
if err = pdq.Select(payloaddigest.FieldID).Scan(ctx, &ids); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -227,7 +228,7 @@ func (pdq *PayloadDigestQuery) IDsX(ctx context.Context) []uuid.UUID {
|
|||
|
||||
// Count returns the count of the given query.
|
||||
func (pdq *PayloadDigestQuery) Count(ctx context.Context) (int, error) {
|
||||
ctx = setContextOp(ctx, pdq.ctx, "Count")
|
||||
ctx = setContextOp(ctx, pdq.ctx, ent.OpQueryCount)
|
||||
if err := pdq.prepareQuery(ctx); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -245,7 +246,7 @@ func (pdq *PayloadDigestQuery) CountX(ctx context.Context) int {
|
|||
|
||||
// Exist returns true if the query has elements in the graph.
|
||||
func (pdq *PayloadDigestQuery) Exist(ctx context.Context) (bool, error) {
|
||||
ctx = setContextOp(ctx, pdq.ctx, "Exist")
|
||||
ctx = setContextOp(ctx, pdq.ctx, ent.OpQueryExist)
|
||||
switch _, err := pdq.FirstID(ctx); {
|
||||
case IsNotFound(err):
|
||||
return false, nil
|
||||
|
@ -550,7 +551,7 @@ func (pdgb *PayloadDigestGroupBy) Aggregate(fns ...AggregateFunc) *PayloadDigest
|
|||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (pdgb *PayloadDigestGroupBy) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, pdgb.build.ctx, "GroupBy")
|
||||
ctx = setContextOp(ctx, pdgb.build.ctx, ent.OpQueryGroupBy)
|
||||
if err := pdgb.build.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -598,7 +599,7 @@ func (pds *PayloadDigestSelect) Aggregate(fns ...AggregateFunc) *PayloadDigestSe
|
|||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (pds *PayloadDigestSelect) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, pds.ctx, "Select")
|
||||
ctx = setContextOp(ctx, pds.ctx, ent.OpQuerySelect)
|
||||
if err := pds.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -81,10 +81,6 @@ func init() {
|
|||
payloaddigest.DefaultID = payloaddigestDescID.Default.(func() uuid.UUID)
|
||||
signatureFields := schema.Signature{}.Fields()
|
||||
_ = signatureFields
|
||||
// signatureDescKeyID is the schema descriptor for key_id field.
|
||||
signatureDescKeyID := signatureFields[1].Descriptor()
|
||||
// signature.KeyIDValidator is a validator for the "key_id" field. It is called by the builders before save.
|
||||
signature.KeyIDValidator = signatureDescKeyID.Validators[0].(func(string) error)
|
||||
// signatureDescSignature is the schema descriptor for signature field.
|
||||
signatureDescSignature := signatureFields[2].Descriptor()
|
||||
// signature.SignatureValidator is a validator for the "signature" field. It is called by the builders before save.
|
||||
|
|
|
@ -5,6 +5,6 @@ package runtime
|
|||
// The schema-stitching logic is generated in github.com/in-toto/archivista/ent/runtime.go
|
||||
|
||||
const (
|
||||
Version = "v0.13.1" // Version of ent codegen.
|
||||
Sum = "h1:uD8QwN1h6SNphdCCzmkMN3feSUzNnVvV/WIkHKMbzOE=" // Sum of ent codegen.
|
||||
Version = "v0.14.4" // Version of ent codegen.
|
||||
Sum = "h1:/DhDraSLXIkBhyiVoJeSshr4ZYi7femzhj6/TckzZuI=" // Sum of ent codegen.
|
||||
)
|
||||
|
|
|
@ -32,7 +32,7 @@ type Signature struct {
|
|||
func (Signature) Fields() []ent.Field {
|
||||
return []ent.Field{
|
||||
field.UUID("id", uuid.UUID{}).Default(uuid.New).Immutable().Unique(),
|
||||
field.String("key_id").NotEmpty(),
|
||||
field.String("key_id"),
|
||||
field.String("signature").NotEmpty().SchemaType(map[string]string{dialect.MySQL: "text"}),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -68,8 +68,6 @@ func ValidColumn(column string) bool {
|
|||
}
|
||||
|
||||
var (
|
||||
// KeyIDValidator is a validator for the "key_id" field. It is called by the builders before save.
|
||||
KeyIDValidator func(string) error
|
||||
// SignatureValidator is a validator for the "signature" field. It is called by the builders before save.
|
||||
SignatureValidator func(string) error
|
||||
// DefaultID holds the default value on creation for the "id" field.
|
||||
|
|
|
@ -128,11 +128,6 @@ func (sc *SignatureCreate) check() error {
|
|||
if _, ok := sc.mutation.KeyID(); !ok {
|
||||
return &ValidationError{Name: "key_id", err: errors.New(`ent: missing required field "Signature.key_id"`)}
|
||||
}
|
||||
if v, ok := sc.mutation.KeyID(); ok {
|
||||
if err := signature.KeyIDValidator(v); err != nil {
|
||||
return &ValidationError{Name: "key_id", err: fmt.Errorf(`ent: validator failed for field "Signature.key_id": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _, ok := sc.mutation.Signature(); !ok {
|
||||
return &ValidationError{Name: "signature", err: errors.New(`ent: missing required field "Signature.signature"`)}
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"fmt"
|
||||
"math"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
|
@ -114,7 +115,7 @@ func (sq *SignatureQuery) QueryTimestamps() *TimestampQuery {
|
|||
// First returns the first Signature entity from the query.
|
||||
// Returns a *NotFoundError when no Signature was found.
|
||||
func (sq *SignatureQuery) First(ctx context.Context) (*Signature, error) {
|
||||
nodes, err := sq.Limit(1).All(setContextOp(ctx, sq.ctx, "First"))
|
||||
nodes, err := sq.Limit(1).All(setContextOp(ctx, sq.ctx, ent.OpQueryFirst))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -137,7 +138,7 @@ func (sq *SignatureQuery) FirstX(ctx context.Context) *Signature {
|
|||
// Returns a *NotFoundError when no Signature ID was found.
|
||||
func (sq *SignatureQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
|
||||
var ids []uuid.UUID
|
||||
if ids, err = sq.Limit(1).IDs(setContextOp(ctx, sq.ctx, "FirstID")); err != nil {
|
||||
if ids, err = sq.Limit(1).IDs(setContextOp(ctx, sq.ctx, ent.OpQueryFirstID)); err != nil {
|
||||
return
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
|
@ -160,7 +161,7 @@ func (sq *SignatureQuery) FirstIDX(ctx context.Context) uuid.UUID {
|
|||
// Returns a *NotSingularError when more than one Signature entity is found.
|
||||
// Returns a *NotFoundError when no Signature entities are found.
|
||||
func (sq *SignatureQuery) Only(ctx context.Context) (*Signature, error) {
|
||||
nodes, err := sq.Limit(2).All(setContextOp(ctx, sq.ctx, "Only"))
|
||||
nodes, err := sq.Limit(2).All(setContextOp(ctx, sq.ctx, ent.OpQueryOnly))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -188,7 +189,7 @@ func (sq *SignatureQuery) OnlyX(ctx context.Context) *Signature {
|
|||
// Returns a *NotFoundError when no entities are found.
|
||||
func (sq *SignatureQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
|
||||
var ids []uuid.UUID
|
||||
if ids, err = sq.Limit(2).IDs(setContextOp(ctx, sq.ctx, "OnlyID")); err != nil {
|
||||
if ids, err = sq.Limit(2).IDs(setContextOp(ctx, sq.ctx, ent.OpQueryOnlyID)); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(ids) {
|
||||
|
@ -213,7 +214,7 @@ func (sq *SignatureQuery) OnlyIDX(ctx context.Context) uuid.UUID {
|
|||
|
||||
// All executes the query and returns a list of Signatures.
|
||||
func (sq *SignatureQuery) All(ctx context.Context) ([]*Signature, error) {
|
||||
ctx = setContextOp(ctx, sq.ctx, "All")
|
||||
ctx = setContextOp(ctx, sq.ctx, ent.OpQueryAll)
|
||||
if err := sq.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -235,7 +236,7 @@ func (sq *SignatureQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error)
|
|||
if sq.ctx.Unique == nil && sq.path != nil {
|
||||
sq.Unique(true)
|
||||
}
|
||||
ctx = setContextOp(ctx, sq.ctx, "IDs")
|
||||
ctx = setContextOp(ctx, sq.ctx, ent.OpQueryIDs)
|
||||
if err = sq.Select(signature.FieldID).Scan(ctx, &ids); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -253,7 +254,7 @@ func (sq *SignatureQuery) IDsX(ctx context.Context) []uuid.UUID {
|
|||
|
||||
// Count returns the count of the given query.
|
||||
func (sq *SignatureQuery) Count(ctx context.Context) (int, error) {
|
||||
ctx = setContextOp(ctx, sq.ctx, "Count")
|
||||
ctx = setContextOp(ctx, sq.ctx, ent.OpQueryCount)
|
||||
if err := sq.prepareQuery(ctx); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -271,7 +272,7 @@ func (sq *SignatureQuery) CountX(ctx context.Context) int {
|
|||
|
||||
// Exist returns true if the query has elements in the graph.
|
||||
func (sq *SignatureQuery) Exist(ctx context.Context) (bool, error) {
|
||||
ctx = setContextOp(ctx, sq.ctx, "Exist")
|
||||
ctx = setContextOp(ctx, sq.ctx, ent.OpQueryExist)
|
||||
switch _, err := sq.FirstID(ctx); {
|
||||
case IsNotFound(err):
|
||||
return false, nil
|
||||
|
@ -648,7 +649,7 @@ func (sgb *SignatureGroupBy) Aggregate(fns ...AggregateFunc) *SignatureGroupBy {
|
|||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (sgb *SignatureGroupBy) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, sgb.build.ctx, "GroupBy")
|
||||
ctx = setContextOp(ctx, sgb.build.ctx, ent.OpQueryGroupBy)
|
||||
if err := sgb.build.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -696,7 +697,7 @@ func (ss *SignatureSelect) Aggregate(fns ...AggregateFunc) *SignatureSelect {
|
|||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (ss *SignatureSelect) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, ss.ctx, "Select")
|
||||
ctx = setContextOp(ctx, ss.ctx, ent.OpQuerySelect)
|
||||
if err := ss.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -153,11 +153,6 @@ func (su *SignatureUpdate) ExecX(ctx context.Context) {
|
|||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (su *SignatureUpdate) check() error {
|
||||
if v, ok := su.mutation.KeyID(); ok {
|
||||
if err := signature.KeyIDValidator(v); err != nil {
|
||||
return &ValidationError{Name: "key_id", err: fmt.Errorf(`ent: validator failed for field "Signature.key_id": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := su.mutation.Signature(); ok {
|
||||
if err := signature.SignatureValidator(v); err != nil {
|
||||
return &ValidationError{Name: "signature", err: fmt.Errorf(`ent: validator failed for field "Signature.signature": %w`, err)}
|
||||
|
@ -414,11 +409,6 @@ func (suo *SignatureUpdateOne) ExecX(ctx context.Context) {
|
|||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (suo *SignatureUpdateOne) check() error {
|
||||
if v, ok := suo.mutation.KeyID(); ok {
|
||||
if err := signature.KeyIDValidator(v); err != nil {
|
||||
return &ValidationError{Name: "key_id", err: fmt.Errorf(`ent: validator failed for field "Signature.key_id": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := suo.mutation.Signature(); ok {
|
||||
if err := signature.SignatureValidator(v); err != nil {
|
||||
return &ValidationError{Name: "signature", err: fmt.Errorf(`ent: validator failed for field "Signature.signature": %w`, err)}
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"fmt"
|
||||
"math"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
|
@ -162,7 +163,7 @@ func (sq *StatementQuery) QueryDsse() *DsseQuery {
|
|||
// First returns the first Statement entity from the query.
|
||||
// Returns a *NotFoundError when no Statement was found.
|
||||
func (sq *StatementQuery) First(ctx context.Context) (*Statement, error) {
|
||||
nodes, err := sq.Limit(1).All(setContextOp(ctx, sq.ctx, "First"))
|
||||
nodes, err := sq.Limit(1).All(setContextOp(ctx, sq.ctx, ent.OpQueryFirst))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -185,7 +186,7 @@ func (sq *StatementQuery) FirstX(ctx context.Context) *Statement {
|
|||
// Returns a *NotFoundError when no Statement ID was found.
|
||||
func (sq *StatementQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
|
||||
var ids []uuid.UUID
|
||||
if ids, err = sq.Limit(1).IDs(setContextOp(ctx, sq.ctx, "FirstID")); err != nil {
|
||||
if ids, err = sq.Limit(1).IDs(setContextOp(ctx, sq.ctx, ent.OpQueryFirstID)); err != nil {
|
||||
return
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
|
@ -208,7 +209,7 @@ func (sq *StatementQuery) FirstIDX(ctx context.Context) uuid.UUID {
|
|||
// Returns a *NotSingularError when more than one Statement entity is found.
|
||||
// Returns a *NotFoundError when no Statement entities are found.
|
||||
func (sq *StatementQuery) Only(ctx context.Context) (*Statement, error) {
|
||||
nodes, err := sq.Limit(2).All(setContextOp(ctx, sq.ctx, "Only"))
|
||||
nodes, err := sq.Limit(2).All(setContextOp(ctx, sq.ctx, ent.OpQueryOnly))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -236,7 +237,7 @@ func (sq *StatementQuery) OnlyX(ctx context.Context) *Statement {
|
|||
// Returns a *NotFoundError when no entities are found.
|
||||
func (sq *StatementQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
|
||||
var ids []uuid.UUID
|
||||
if ids, err = sq.Limit(2).IDs(setContextOp(ctx, sq.ctx, "OnlyID")); err != nil {
|
||||
if ids, err = sq.Limit(2).IDs(setContextOp(ctx, sq.ctx, ent.OpQueryOnlyID)); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(ids) {
|
||||
|
@ -261,7 +262,7 @@ func (sq *StatementQuery) OnlyIDX(ctx context.Context) uuid.UUID {
|
|||
|
||||
// All executes the query and returns a list of Statements.
|
||||
func (sq *StatementQuery) All(ctx context.Context) ([]*Statement, error) {
|
||||
ctx = setContextOp(ctx, sq.ctx, "All")
|
||||
ctx = setContextOp(ctx, sq.ctx, ent.OpQueryAll)
|
||||
if err := sq.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -283,7 +284,7 @@ func (sq *StatementQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error)
|
|||
if sq.ctx.Unique == nil && sq.path != nil {
|
||||
sq.Unique(true)
|
||||
}
|
||||
ctx = setContextOp(ctx, sq.ctx, "IDs")
|
||||
ctx = setContextOp(ctx, sq.ctx, ent.OpQueryIDs)
|
||||
if err = sq.Select(statement.FieldID).Scan(ctx, &ids); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -301,7 +302,7 @@ func (sq *StatementQuery) IDsX(ctx context.Context) []uuid.UUID {
|
|||
|
||||
// Count returns the count of the given query.
|
||||
func (sq *StatementQuery) Count(ctx context.Context) (int, error) {
|
||||
ctx = setContextOp(ctx, sq.ctx, "Count")
|
||||
ctx = setContextOp(ctx, sq.ctx, ent.OpQueryCount)
|
||||
if err := sq.prepareQuery(ctx); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -319,7 +320,7 @@ func (sq *StatementQuery) CountX(ctx context.Context) int {
|
|||
|
||||
// Exist returns true if the query has elements in the graph.
|
||||
func (sq *StatementQuery) Exist(ctx context.Context) (bool, error) {
|
||||
ctx = setContextOp(ctx, sq.ctx, "Exist")
|
||||
ctx = setContextOp(ctx, sq.ctx, ent.OpQueryExist)
|
||||
switch _, err := sq.FirstID(ctx); {
|
||||
case IsNotFound(err):
|
||||
return false, nil
|
||||
|
@ -804,7 +805,7 @@ func (sgb *StatementGroupBy) Aggregate(fns ...AggregateFunc) *StatementGroupBy {
|
|||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (sgb *StatementGroupBy) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, sgb.build.ctx, "GroupBy")
|
||||
ctx = setContextOp(ctx, sgb.build.ctx, ent.OpQueryGroupBy)
|
||||
if err := sgb.build.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -852,7 +853,7 @@ func (ss *StatementSelect) Aggregate(fns ...AggregateFunc) *StatementSelect {
|
|||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (ss *StatementSelect) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, ss.ctx, "Select")
|
||||
ctx = setContextOp(ctx, ss.ctx, ent.OpQuerySelect)
|
||||
if err := ss.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"fmt"
|
||||
"math"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
|
@ -114,7 +115,7 @@ func (sq *SubjectQuery) QueryStatement() *StatementQuery {
|
|||
// First returns the first Subject entity from the query.
|
||||
// Returns a *NotFoundError when no Subject was found.
|
||||
func (sq *SubjectQuery) First(ctx context.Context) (*Subject, error) {
|
||||
nodes, err := sq.Limit(1).All(setContextOp(ctx, sq.ctx, "First"))
|
||||
nodes, err := sq.Limit(1).All(setContextOp(ctx, sq.ctx, ent.OpQueryFirst))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -137,7 +138,7 @@ func (sq *SubjectQuery) FirstX(ctx context.Context) *Subject {
|
|||
// Returns a *NotFoundError when no Subject ID was found.
|
||||
func (sq *SubjectQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
|
||||
var ids []uuid.UUID
|
||||
if ids, err = sq.Limit(1).IDs(setContextOp(ctx, sq.ctx, "FirstID")); err != nil {
|
||||
if ids, err = sq.Limit(1).IDs(setContextOp(ctx, sq.ctx, ent.OpQueryFirstID)); err != nil {
|
||||
return
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
|
@ -160,7 +161,7 @@ func (sq *SubjectQuery) FirstIDX(ctx context.Context) uuid.UUID {
|
|||
// Returns a *NotSingularError when more than one Subject entity is found.
|
||||
// Returns a *NotFoundError when no Subject entities are found.
|
||||
func (sq *SubjectQuery) Only(ctx context.Context) (*Subject, error) {
|
||||
nodes, err := sq.Limit(2).All(setContextOp(ctx, sq.ctx, "Only"))
|
||||
nodes, err := sq.Limit(2).All(setContextOp(ctx, sq.ctx, ent.OpQueryOnly))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -188,7 +189,7 @@ func (sq *SubjectQuery) OnlyX(ctx context.Context) *Subject {
|
|||
// Returns a *NotFoundError when no entities are found.
|
||||
func (sq *SubjectQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
|
||||
var ids []uuid.UUID
|
||||
if ids, err = sq.Limit(2).IDs(setContextOp(ctx, sq.ctx, "OnlyID")); err != nil {
|
||||
if ids, err = sq.Limit(2).IDs(setContextOp(ctx, sq.ctx, ent.OpQueryOnlyID)); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(ids) {
|
||||
|
@ -213,7 +214,7 @@ func (sq *SubjectQuery) OnlyIDX(ctx context.Context) uuid.UUID {
|
|||
|
||||
// All executes the query and returns a list of Subjects.
|
||||
func (sq *SubjectQuery) All(ctx context.Context) ([]*Subject, error) {
|
||||
ctx = setContextOp(ctx, sq.ctx, "All")
|
||||
ctx = setContextOp(ctx, sq.ctx, ent.OpQueryAll)
|
||||
if err := sq.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -235,7 +236,7 @@ func (sq *SubjectQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) {
|
|||
if sq.ctx.Unique == nil && sq.path != nil {
|
||||
sq.Unique(true)
|
||||
}
|
||||
ctx = setContextOp(ctx, sq.ctx, "IDs")
|
||||
ctx = setContextOp(ctx, sq.ctx, ent.OpQueryIDs)
|
||||
if err = sq.Select(subject.FieldID).Scan(ctx, &ids); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -253,7 +254,7 @@ func (sq *SubjectQuery) IDsX(ctx context.Context) []uuid.UUID {
|
|||
|
||||
// Count returns the count of the given query.
|
||||
func (sq *SubjectQuery) Count(ctx context.Context) (int, error) {
|
||||
ctx = setContextOp(ctx, sq.ctx, "Count")
|
||||
ctx = setContextOp(ctx, sq.ctx, ent.OpQueryCount)
|
||||
if err := sq.prepareQuery(ctx); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -271,7 +272,7 @@ func (sq *SubjectQuery) CountX(ctx context.Context) int {
|
|||
|
||||
// Exist returns true if the query has elements in the graph.
|
||||
func (sq *SubjectQuery) Exist(ctx context.Context) (bool, error) {
|
||||
ctx = setContextOp(ctx, sq.ctx, "Exist")
|
||||
ctx = setContextOp(ctx, sq.ctx, ent.OpQueryExist)
|
||||
switch _, err := sq.FirstID(ctx); {
|
||||
case IsNotFound(err):
|
||||
return false, nil
|
||||
|
@ -648,7 +649,7 @@ func (sgb *SubjectGroupBy) Aggregate(fns ...AggregateFunc) *SubjectGroupBy {
|
|||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (sgb *SubjectGroupBy) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, sgb.build.ctx, "GroupBy")
|
||||
ctx = setContextOp(ctx, sgb.build.ctx, ent.OpQueryGroupBy)
|
||||
if err := sgb.build.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -696,7 +697,7 @@ func (ss *SubjectSelect) Aggregate(fns ...AggregateFunc) *SubjectSelect {
|
|||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (ss *SubjectSelect) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, ss.ctx, "Select")
|
||||
ctx = setContextOp(ctx, ss.ctx, ent.OpQuerySelect)
|
||||
if err := ss.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"fmt"
|
||||
"math"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
|
@ -88,7 +89,7 @@ func (sdq *SubjectDigestQuery) QuerySubject() *SubjectQuery {
|
|||
// First returns the first SubjectDigest entity from the query.
|
||||
// Returns a *NotFoundError when no SubjectDigest was found.
|
||||
func (sdq *SubjectDigestQuery) First(ctx context.Context) (*SubjectDigest, error) {
|
||||
nodes, err := sdq.Limit(1).All(setContextOp(ctx, sdq.ctx, "First"))
|
||||
nodes, err := sdq.Limit(1).All(setContextOp(ctx, sdq.ctx, ent.OpQueryFirst))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -111,7 +112,7 @@ func (sdq *SubjectDigestQuery) FirstX(ctx context.Context) *SubjectDigest {
|
|||
// Returns a *NotFoundError when no SubjectDigest ID was found.
|
||||
func (sdq *SubjectDigestQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
|
||||
var ids []uuid.UUID
|
||||
if ids, err = sdq.Limit(1).IDs(setContextOp(ctx, sdq.ctx, "FirstID")); err != nil {
|
||||
if ids, err = sdq.Limit(1).IDs(setContextOp(ctx, sdq.ctx, ent.OpQueryFirstID)); err != nil {
|
||||
return
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
|
@ -134,7 +135,7 @@ func (sdq *SubjectDigestQuery) FirstIDX(ctx context.Context) uuid.UUID {
|
|||
// Returns a *NotSingularError when more than one SubjectDigest entity is found.
|
||||
// Returns a *NotFoundError when no SubjectDigest entities are found.
|
||||
func (sdq *SubjectDigestQuery) Only(ctx context.Context) (*SubjectDigest, error) {
|
||||
nodes, err := sdq.Limit(2).All(setContextOp(ctx, sdq.ctx, "Only"))
|
||||
nodes, err := sdq.Limit(2).All(setContextOp(ctx, sdq.ctx, ent.OpQueryOnly))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -162,7 +163,7 @@ func (sdq *SubjectDigestQuery) OnlyX(ctx context.Context) *SubjectDigest {
|
|||
// Returns a *NotFoundError when no entities are found.
|
||||
func (sdq *SubjectDigestQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
|
||||
var ids []uuid.UUID
|
||||
if ids, err = sdq.Limit(2).IDs(setContextOp(ctx, sdq.ctx, "OnlyID")); err != nil {
|
||||
if ids, err = sdq.Limit(2).IDs(setContextOp(ctx, sdq.ctx, ent.OpQueryOnlyID)); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(ids) {
|
||||
|
@ -187,7 +188,7 @@ func (sdq *SubjectDigestQuery) OnlyIDX(ctx context.Context) uuid.UUID {
|
|||
|
||||
// All executes the query and returns a list of SubjectDigests.
|
||||
func (sdq *SubjectDigestQuery) All(ctx context.Context) ([]*SubjectDigest, error) {
|
||||
ctx = setContextOp(ctx, sdq.ctx, "All")
|
||||
ctx = setContextOp(ctx, sdq.ctx, ent.OpQueryAll)
|
||||
if err := sdq.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -209,7 +210,7 @@ func (sdq *SubjectDigestQuery) IDs(ctx context.Context) (ids []uuid.UUID, err er
|
|||
if sdq.ctx.Unique == nil && sdq.path != nil {
|
||||
sdq.Unique(true)
|
||||
}
|
||||
ctx = setContextOp(ctx, sdq.ctx, "IDs")
|
||||
ctx = setContextOp(ctx, sdq.ctx, ent.OpQueryIDs)
|
||||
if err = sdq.Select(subjectdigest.FieldID).Scan(ctx, &ids); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -227,7 +228,7 @@ func (sdq *SubjectDigestQuery) IDsX(ctx context.Context) []uuid.UUID {
|
|||
|
||||
// Count returns the count of the given query.
|
||||
func (sdq *SubjectDigestQuery) Count(ctx context.Context) (int, error) {
|
||||
ctx = setContextOp(ctx, sdq.ctx, "Count")
|
||||
ctx = setContextOp(ctx, sdq.ctx, ent.OpQueryCount)
|
||||
if err := sdq.prepareQuery(ctx); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -245,7 +246,7 @@ func (sdq *SubjectDigestQuery) CountX(ctx context.Context) int {
|
|||
|
||||
// Exist returns true if the query has elements in the graph.
|
||||
func (sdq *SubjectDigestQuery) Exist(ctx context.Context) (bool, error) {
|
||||
ctx = setContextOp(ctx, sdq.ctx, "Exist")
|
||||
ctx = setContextOp(ctx, sdq.ctx, ent.OpQueryExist)
|
||||
switch _, err := sdq.FirstID(ctx); {
|
||||
case IsNotFound(err):
|
||||
return false, nil
|
||||
|
@ -550,7 +551,7 @@ func (sdgb *SubjectDigestGroupBy) Aggregate(fns ...AggregateFunc) *SubjectDigest
|
|||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (sdgb *SubjectDigestGroupBy) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, sdgb.build.ctx, "GroupBy")
|
||||
ctx = setContextOp(ctx, sdgb.build.ctx, ent.OpQueryGroupBy)
|
||||
if err := sdgb.build.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -598,7 +599,7 @@ func (sds *SubjectDigestSelect) Aggregate(fns ...AggregateFunc) *SubjectDigestSe
|
|||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (sds *SubjectDigestSelect) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, sds.ctx, "Select")
|
||||
ctx = setContextOp(ctx, sds.ctx, ent.OpQuerySelect)
|
||||
if err := sds.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"fmt"
|
||||
"math"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
|
@ -88,7 +89,7 @@ func (tq *TimestampQuery) QuerySignature() *SignatureQuery {
|
|||
// First returns the first Timestamp entity from the query.
|
||||
// Returns a *NotFoundError when no Timestamp was found.
|
||||
func (tq *TimestampQuery) First(ctx context.Context) (*Timestamp, error) {
|
||||
nodes, err := tq.Limit(1).All(setContextOp(ctx, tq.ctx, "First"))
|
||||
nodes, err := tq.Limit(1).All(setContextOp(ctx, tq.ctx, ent.OpQueryFirst))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -111,7 +112,7 @@ func (tq *TimestampQuery) FirstX(ctx context.Context) *Timestamp {
|
|||
// Returns a *NotFoundError when no Timestamp ID was found.
|
||||
func (tq *TimestampQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
|
||||
var ids []uuid.UUID
|
||||
if ids, err = tq.Limit(1).IDs(setContextOp(ctx, tq.ctx, "FirstID")); err != nil {
|
||||
if ids, err = tq.Limit(1).IDs(setContextOp(ctx, tq.ctx, ent.OpQueryFirstID)); err != nil {
|
||||
return
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
|
@ -134,7 +135,7 @@ func (tq *TimestampQuery) FirstIDX(ctx context.Context) uuid.UUID {
|
|||
// Returns a *NotSingularError when more than one Timestamp entity is found.
|
||||
// Returns a *NotFoundError when no Timestamp entities are found.
|
||||
func (tq *TimestampQuery) Only(ctx context.Context) (*Timestamp, error) {
|
||||
nodes, err := tq.Limit(2).All(setContextOp(ctx, tq.ctx, "Only"))
|
||||
nodes, err := tq.Limit(2).All(setContextOp(ctx, tq.ctx, ent.OpQueryOnly))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -162,7 +163,7 @@ func (tq *TimestampQuery) OnlyX(ctx context.Context) *Timestamp {
|
|||
// Returns a *NotFoundError when no entities are found.
|
||||
func (tq *TimestampQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
|
||||
var ids []uuid.UUID
|
||||
if ids, err = tq.Limit(2).IDs(setContextOp(ctx, tq.ctx, "OnlyID")); err != nil {
|
||||
if ids, err = tq.Limit(2).IDs(setContextOp(ctx, tq.ctx, ent.OpQueryOnlyID)); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(ids) {
|
||||
|
@ -187,7 +188,7 @@ func (tq *TimestampQuery) OnlyIDX(ctx context.Context) uuid.UUID {
|
|||
|
||||
// All executes the query and returns a list of Timestamps.
|
||||
func (tq *TimestampQuery) All(ctx context.Context) ([]*Timestamp, error) {
|
||||
ctx = setContextOp(ctx, tq.ctx, "All")
|
||||
ctx = setContextOp(ctx, tq.ctx, ent.OpQueryAll)
|
||||
if err := tq.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -209,7 +210,7 @@ func (tq *TimestampQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error)
|
|||
if tq.ctx.Unique == nil && tq.path != nil {
|
||||
tq.Unique(true)
|
||||
}
|
||||
ctx = setContextOp(ctx, tq.ctx, "IDs")
|
||||
ctx = setContextOp(ctx, tq.ctx, ent.OpQueryIDs)
|
||||
if err = tq.Select(timestamp.FieldID).Scan(ctx, &ids); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -227,7 +228,7 @@ func (tq *TimestampQuery) IDsX(ctx context.Context) []uuid.UUID {
|
|||
|
||||
// Count returns the count of the given query.
|
||||
func (tq *TimestampQuery) Count(ctx context.Context) (int, error) {
|
||||
ctx = setContextOp(ctx, tq.ctx, "Count")
|
||||
ctx = setContextOp(ctx, tq.ctx, ent.OpQueryCount)
|
||||
if err := tq.prepareQuery(ctx); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -245,7 +246,7 @@ func (tq *TimestampQuery) CountX(ctx context.Context) int {
|
|||
|
||||
// Exist returns true if the query has elements in the graph.
|
||||
func (tq *TimestampQuery) Exist(ctx context.Context) (bool, error) {
|
||||
ctx = setContextOp(ctx, tq.ctx, "Exist")
|
||||
ctx = setContextOp(ctx, tq.ctx, ent.OpQueryExist)
|
||||
switch _, err := tq.FirstID(ctx); {
|
||||
case IsNotFound(err):
|
||||
return false, nil
|
||||
|
@ -550,7 +551,7 @@ func (tgb *TimestampGroupBy) Aggregate(fns ...AggregateFunc) *TimestampGroupBy {
|
|||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (tgb *TimestampGroupBy) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, tgb.build.ctx, "GroupBy")
|
||||
ctx = setContextOp(ctx, tgb.build.ctx, ent.OpQueryGroupBy)
|
||||
if err := tgb.build.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -598,7 +599,7 @@ func (ts *TimestampSelect) Aggregate(fns ...AggregateFunc) *TimestampSelect {
|
|||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (ts *TimestampSelect) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, ts.ctx, "Select")
|
||||
ctx = setContextOp(ctx, ts.ctx, ent.OpQuerySelect)
|
||||
if err := ts.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -13,34 +13,40 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
if [[ -z $ARCHIVISTA_SQL_STORE_BACKEND ]]; then
|
||||
SQL_TYPE="MYSQL"
|
||||
else
|
||||
SQL_TYPE=$(echo "$ARCHIVISTA_SQL_STORE_BACKEND" | tr '[:lower:]' '[:upper:]')
|
||||
fi
|
||||
case $SQL_TYPE in
|
||||
MYSQL)
|
||||
if [[ -z $ARCHIVISTA_SQL_STORE_CONNECTION_STRING ]]; then
|
||||
ARCHIVISTA_SQL_STORE_CONNECTION_STRING="root:example@db/testify"
|
||||
fi
|
||||
echo "Running migrations for MySQL"
|
||||
atlas migrate apply --dir "file:///archivista/migrations/mysql" --url "mysql://$ARCHIVISTA_SQL_STORE_CONNECTION_STRING"
|
||||
atlas_rc=$?
|
||||
;;
|
||||
PSQL)
|
||||
echo "Running migrations for Postgres"
|
||||
atlas migrate apply --dir "file:///archivista/migrations/pgsql" --url "$ARCHIVISTA_SQL_STORE_CONNECTION_STRING"
|
||||
atlas_rc=$?
|
||||
;;
|
||||
*)
|
||||
echo "Unknown SQL backend: $ARCHIVISTA_SQL_STORE_BACKEND"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
ARCHIVISTA_ENABLE_SQL_STORE=$(echo ${ARCHIVISTA_ENABLE_SQL_STORE} | tr '[:lower:]' '[:upper:]')
|
||||
|
||||
if [[ $atlas_rc -ne 0 ]]; then
|
||||
if [ "${ARCHIVISTA_ENABLE_SQL_STORE}" = "FALSE" ]; then
|
||||
echo "Skipping migrations"
|
||||
else
|
||||
if [[ -z $ARCHIVISTA_SQL_STORE_BACKEND ]]; then
|
||||
SQL_TYPE="MYSQL"
|
||||
else
|
||||
SQL_TYPE=$(echo "$ARCHIVISTA_SQL_STORE_BACKEND" | tr '[:lower:]' '[:upper:]')
|
||||
fi
|
||||
case $SQL_TYPE in
|
||||
MYSQL)
|
||||
if [[ -z $ARCHIVISTA_SQL_STORE_CONNECTION_STRING ]]; then
|
||||
ARCHIVISTA_SQL_STORE_CONNECTION_STRING="root:example@db/testify"
|
||||
fi
|
||||
echo "Running migrations for MySQL"
|
||||
atlas migrate apply --dir "file:///archivista/migrations/mysql" --url "mysql://$ARCHIVISTA_SQL_STORE_CONNECTION_STRING"
|
||||
atlas_rc=$?
|
||||
;;
|
||||
PSQL)
|
||||
echo "Running migrations for Postgres"
|
||||
atlas migrate apply --dir "file:///archivista/migrations/pgsql" --url "$ARCHIVISTA_SQL_STORE_CONNECTION_STRING"
|
||||
atlas_rc=$?
|
||||
;;
|
||||
*)
|
||||
echo "Unknown SQL backend: $ARCHIVISTA_SQL_STORE_BACKEND"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
if [[ $atlas_rc -ne 0 ]]; then
|
||||
echo "Failed to apply migrations"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
/bin/archivista
|
||||
|
|
1711
generated.go
1711
generated.go
File diff suppressed because it is too large
Load Diff
106
go.mod
106
go.mod
|
@ -1,45 +1,46 @@
|
|||
module github.com/in-toto/archivista
|
||||
|
||||
go 1.22.0
|
||||
go 1.23.8
|
||||
|
||||
toolchain go1.22.2
|
||||
toolchain go1.24.2
|
||||
|
||||
require (
|
||||
ariga.io/sqlcomment v0.1.0
|
||||
entgo.io/contrib v0.5.0
|
||||
entgo.io/ent v0.13.1
|
||||
github.com/99designs/gqlgen v0.17.49
|
||||
entgo.io/contrib v0.6.0
|
||||
entgo.io/ent v0.14.4
|
||||
github.com/99designs/gqlgen v0.17.76
|
||||
github.com/antonfisher/nested-logrus-formatter v1.3.1
|
||||
github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7
|
||||
github.com/edwarnicke/gitoid v0.0.0-20220710194850-1be5bfda1f9d
|
||||
github.com/go-sql-driver/mysql v1.8.1
|
||||
github.com/go-sql-driver/mysql v1.9.3
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/gorilla/handlers v1.5.2
|
||||
github.com/gorilla/mux v1.8.1
|
||||
github.com/hashicorp/go-multierror v1.1.1
|
||||
github.com/in-toto/go-witness v0.6.0
|
||||
github.com/in-toto/go-witness v0.8.6
|
||||
github.com/kelseyhightower/envconfig v1.4.0
|
||||
github.com/lib/pq v1.10.9
|
||||
github.com/minio/minio-go/v7 v7.0.73
|
||||
github.com/minio/minio-go/v7 v7.0.94
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/cobra v1.8.1
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/spf13/cobra v1.9.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/swaggo/http-swagger/v2 v2.0.2
|
||||
github.com/swaggo/swag v1.16.3
|
||||
github.com/vektah/gqlparser/v2 v2.5.16
|
||||
github.com/swaggo/swag v1.16.4
|
||||
github.com/vektah/gqlparser/v2 v2.5.30
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
||||
require (
|
||||
ariga.io/atlas v0.21.1 // indirect
|
||||
ariga.io/atlas v0.31.1-0.20250212144724-069be8033e83 // indirect
|
||||
filippo.io/edwards25519 v1.1.0 // indirect
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.2 // indirect
|
||||
github.com/KyleBanks/depth v1.2.1 // indirect
|
||||
github.com/OneOfOne/xxhash v1.2.8 // indirect
|
||||
github.com/agext/levenshtein v1.2.3 // indirect
|
||||
github.com/agnivade/levenshtein v1.1.1 // indirect
|
||||
github.com/agnivade/levenshtein v1.2.1 // indirect
|
||||
github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect
|
||||
github.com/bahlo/generic-list-go v0.2.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bmatcuk/doublestar v1.3.4 // indirect
|
||||
github.com/buger/jsonparser v1.1.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
|
@ -47,19 +48,20 @@ require (
|
|||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/go-ini/ini v1.67.0 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/inflect v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.21.0 // indirect
|
||||
github.com/go-openapi/spec v0.21.0 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/go-test/deep v1.1.0 // indirect
|
||||
github.com/go-test/deep v1.1.1 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.3.0 // indirect
|
||||
github.com/gobwas/glob v0.2.3 // indirect
|
||||
github.com/goccy/go-json v0.10.3 // indirect
|
||||
github.com/goccy/go-json v0.10.5 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.1 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
|
@ -69,29 +71,34 @@ require (
|
|||
github.com/invopop/jsonschema v0.12.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.17.9 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.8 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.24 // indirect
|
||||
github.com/miekg/dns v1.1.58 // indirect
|
||||
github.com/minio/crc64nvme v1.0.1 // indirect
|
||||
github.com/minio/md5-simd v1.1.2 // indirect
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/open-policy-agent/opa v0.64.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/open-policy-agent/opa v1.4.2 // indirect
|
||||
github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_golang v1.19.0 // indirect
|
||||
github.com/prometheus/client_golang v1.21.1 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.53.0 // indirect
|
||||
github.com/prometheus/procfs v0.14.0 // indirect
|
||||
github.com/prometheus/common v0.62.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
|
||||
github.com/rs/xid v1.5.0 // indirect
|
||||
github.com/sigstore/fulcio v1.4.5 // indirect
|
||||
github.com/rogpeppe/go-internal v1.13.2-0.20241226121412-a5dc8ff20d0a // indirect
|
||||
github.com/rs/xid v1.6.0 // indirect
|
||||
github.com/sigstore/fulcio v1.6.6 // indirect
|
||||
github.com/sosodev/duration v1.3.1 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/swaggo/files/v2 v2.0.0 // indirect
|
||||
github.com/tchap/go-patricia/v2 v2.3.1 // indirect
|
||||
github.com/tchap/go-patricia/v2 v2.3.2 // indirect
|
||||
github.com/tinylib/msgp v1.3.0 // indirect
|
||||
github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect
|
||||
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
|
||||
github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect
|
||||
|
@ -99,26 +106,27 @@ require (
|
|||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/yashtewari/glob-intersection v0.2.0 // indirect
|
||||
github.com/zclconf/go-cty v1.14.4 // indirect
|
||||
github.com/zclconf/go-cty-yaml v1.1.0 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/otel v1.26.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.26.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.26.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.26.0 // indirect
|
||||
golang.org/x/crypto v0.24.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f // indirect
|
||||
golang.org/x/mod v0.18.0 // indirect
|
||||
golang.org/x/net v0.26.0 // indirect
|
||||
golang.org/x/sync v0.7.0 // indirect
|
||||
golang.org/x/sys v0.21.0 // indirect
|
||||
golang.org/x/text v0.16.0 // indirect
|
||||
golang.org/x/tools v0.22.0 // indirect
|
||||
google.golang.org/protobuf v1.34.1 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/otel v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.35.0 // indirect
|
||||
golang.org/x/crypto v0.39.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6 // indirect
|
||||
golang.org/x/mod v0.25.0 // indirect
|
||||
golang.org/x/net v0.41.0 // indirect
|
||||
golang.org/x/sync v0.15.0 // indirect
|
||||
golang.org/x/sys v0.33.0 // indirect
|
||||
golang.org/x/text v0.26.0 // indirect
|
||||
golang.org/x/tools v0.34.0 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
k8s.io/apimachinery v0.30.0 // indirect
|
||||
k8s.io/klog/v2 v2.120.1 // indirect
|
||||
k8s.io/utils v0.0.0-20240423183400-0849a56e8f22 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
k8s.io/apimachinery v0.30.12 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
)
|
||||
|
|
391
go.sum
391
go.sum
|
@ -1,107 +1,105 @@
|
|||
ariga.io/atlas v0.21.1 h1:Eg9XYhKTH3UHoqP7tKMWFV+Z5JnpVOJCgO3MHrUtKmk=
|
||||
ariga.io/atlas v0.21.1/go.mod h1:VPlcXdd4w2KqKnH54yEZcry79UAhpaWaxEsmn5JRNoE=
|
||||
ariga.io/atlas v0.31.1-0.20250212144724-069be8033e83 h1:nX4HXncwIdvQ8/8sIUIf1nyCkK8qdBaHQ7EtzPpuiGE=
|
||||
ariga.io/atlas v0.31.1-0.20250212144724-069be8033e83/go.mod h1:Oe1xWPuu5q9LzyrWfbZmEZxFYeu4BHTyzfjeW2aZp/w=
|
||||
ariga.io/sqlcomment v0.1.0 h1:8kQPlVe3sXpTloEFlpX5dhFAXB28i6rwq9ktqqnPx70=
|
||||
ariga.io/sqlcomment v0.1.0/go.mod h1:NT1IZMfBTQl1MUU5wgVONmnDqFRqtZrdDRgAXfc1g5k=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
|
||||
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
||||
entgo.io/contrib v0.5.0 h1:M4IqodImfUm327RDwNAITLNz3PsxVeC3rD4DPeVA8Gs=
|
||||
entgo.io/contrib v0.5.0/go.mod h1:q8dXQCmzqpSlVdT2bWDydjgznGcy3y4zmsYmVFC9V/U=
|
||||
entgo.io/ent v0.13.1 h1:uD8QwN1h6SNphdCCzmkMN3feSUzNnVvV/WIkHKMbzOE=
|
||||
entgo.io/ent v0.13.1/go.mod h1:qCEmo+biw3ccBn9OyL4ZK5dfpwg++l1Gxwac5B1206A=
|
||||
dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8=
|
||||
dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA=
|
||||
entgo.io/contrib v0.6.0 h1:xfo4TbJE7sJZWx7BV7YrpSz7IPFvS8MzL3fnfzZjKvQ=
|
||||
entgo.io/contrib v0.6.0/go.mod h1:3qWIseJ/9Wx2Hu5zVh15FDzv7d/UvKNcYKdViywWCQg=
|
||||
entgo.io/ent v0.14.4 h1:/DhDraSLXIkBhyiVoJeSshr4ZYi7femzhj6/TckzZuI=
|
||||
entgo.io/ent v0.14.4/go.mod h1:aDPE/OziPEu8+OWbzy4UlvWmD2/kbRuWfK2A40hcxJM=
|
||||
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
|
||||
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
|
||||
github.com/99designs/gqlgen v0.17.49 h1:b3hNGexHd33fBSAd4NDT/c3NCcQzcAVkknhN9ym36YQ=
|
||||
github.com/99designs/gqlgen v0.17.49/go.mod h1:tC8YFVZMed81x7UJ7ORUwXF4Kn6SXuucFqQBhN8+BU0=
|
||||
github.com/99designs/gqlgen v0.17.76 h1:YsJBcfACWmXWU2t1yCjoGdOmqcTfOFpjbLAE443fmYI=
|
||||
github.com/99designs/gqlgen v0.17.76/go.mod h1:miiU+PkAnTIDKMQ1BseUOIVeQHoiwYDZGCswoxl7xec=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
|
||||
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
|
||||
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
|
||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
||||
github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8=
|
||||
github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
|
||||
github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78=
|
||||
github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0=
|
||||
github.com/PuerkitoBio/goquery v1.9.2 h1:4/wZksC3KgkQw7SQgkKotmKljk0M6V8TUvA8Wb4yPeE=
|
||||
github.com/PuerkitoBio/goquery v1.9.2/go.mod h1:GHPCaP0ODyyxqcNoFGYlAprUFH81NuRPd0GX3Zu2Mvk=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/ProtonMail/go-crypto v1.1.5 h1:eoAQfK2dwL+tFSFpr7TbOaPNUbPiJj4fLYwwGE1FQO4=
|
||||
github.com/ProtonMail/go-crypto v1.1.5/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE=
|
||||
github.com/PuerkitoBio/goquery v1.10.3 h1:pFYcNSqHxBD06Fpj/KsbStFRsgRATgnf3LeXiUkhzPo=
|
||||
github.com/PuerkitoBio/goquery v1.10.3/go.mod h1:tMUX0zDMHXYlAQk6p35XxQMqMweEKB7iK7iLNd4RH4Y=
|
||||
github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=
|
||||
github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
|
||||
github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8=
|
||||
github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo=
|
||||
github.com/agnivade/levenshtein v1.2.1 h1:EHBY3UOn1gwdy/VbFwgo4cxecRznFk7fKWN1KOX7eoM=
|
||||
github.com/agnivade/levenshtein v1.2.1/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU=
|
||||
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ=
|
||||
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
|
||||
github.com/andybalholm/cascadia v1.3.2 h1:3Xi6Dw5lHF15JtdcmAHD3i1+T8plmv7BQ/nsViSLyss=
|
||||
github.com/andybalholm/cascadia v1.3.2/go.mod h1:7gtRlve5FxPPgIgX36uWBX58OdBsSS6lUvCFb+h7KvU=
|
||||
github.com/andybalholm/cascadia v1.3.3 h1:AG2YHrzJIm4BZ19iwJ/DAua6Btl3IwJX+VI4kktS1LM=
|
||||
github.com/andybalholm/cascadia v1.3.3/go.mod h1:xNd9bqTn98Ln4DwST8/nG+H0yuB8Hmgu1YHNnWw0GeA=
|
||||
github.com/antonfisher/nested-logrus-formatter v1.3.1 h1:NFJIr+pzwv5QLHTPyKz9UMEoHck02Q9L0FP13b/xSbQ=
|
||||
github.com/antonfisher/nested-logrus-formatter v1.3.1/go.mod h1:6WTfyWFkBc9+zyBaKIqRrg/KwMqBbodBjgbHjDz7zjA=
|
||||
github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY=
|
||||
github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4=
|
||||
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q=
|
||||
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE=
|
||||
github.com/aws/aws-sdk-go v1.50.38 h1:h8wxaLin7sFGK4sKassc1VpNcDbgAAEQJ5PHjqLAvXQ=
|
||||
github.com/aws/aws-sdk-go v1.50.38/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
|
||||
github.com/aws/aws-sdk-go-v2 v1.27.2 h1:pLsTXqX93rimAOZG2FIYraDQstZaaGVVN4tNw65v0h8=
|
||||
github.com/aws/aws-sdk-go-v2 v1.27.2/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.27.18 h1:wFvAnwOKKe7QAyIxziwSKjmer9JBMH1vzIL6W+fYuKk=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.27.18/go.mod h1:0xz6cgdX55+kmppvPm2IaKzIXOheGJhAufacPJaXZ7c=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.18 h1:D/ALDWqK4JdY3OFgA2thcPO1c9aYTT5STS/CvnkqY1c=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.18/go.mod h1:JuitCWq+F5QGUrmMPsk945rop6bB57jdscu+Glozdnc=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.5 h1:dDgptDO9dxeFkXy+tEgVkzSClHZje/6JkPW5aZyEvrQ=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.5/go.mod h1:gjvE2KBUgUQhcv89jqxrIxH9GaKs1JbZzWejj/DaHGA=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.9 h1:cy8ahBJuhtM8GTTSyOkfy6WVPV1IE+SS5/wfXUYuulw=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.9/go.mod h1:CZBXGLaJnEZI6EVNcPd7a6B5IC5cA/GkRWtu9fp3S6Y=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.9 h1:A4SYk07ef04+vxZToz9LWvAXl9LW0NClpPpMsi31cz0=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.9/go.mod h1:5jJcHuwDagxN+ErjQ3PU3ocf6Ylc/p9x+BLO/+X4iXw=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 h1:Ji0DY1xUsUr3I8cHps0G+XM3WWU16lP6yG8qu1GAZAs=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.11 h1:o4T+fKxA3gTMcluBNZZXE9DNaMkJuUL1O3mffCUjoJo=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.11/go.mod h1:84oZdJ+VjuJKs9v1UTC9NaodRZRseOXCTgku+vQJWR8=
|
||||
github.com/aws/aws-sdk-go-v2/service/kms v1.31.3 h1:wLBgq6nDNYdd0A5CvscVAKV5SVlHKOHVPedpgtigATg=
|
||||
github.com/aws/aws-sdk-go-v2/service/kms v1.31.3/go.mod h1:8lETO9lelSG2B6KMXFh2OwPPqGV6WQM3RqLAEjP1xaU=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.20.11 h1:gEYM2GSpr4YNWc6hCd5nod4+d4kd9vWIAWrmGuLdlMw=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.20.11/go.mod h1:gVvwPdPNYehHSP9Rs7q27U1EU+3Or2ZpXvzAYJNh63w=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.5 h1:iXjh3uaH3vsVcnyZX7MqCoCfcyxIrVE9iOQruRaWPrQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.5/go.mod h1:5ZXesEuy/QcO0WUnt+4sDkxhdXRHTu2yG0uCSH8B6os=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.28.12 h1:M/1u4HBpwLuMtjlxuI2y6HoVLzF5e2mfxHCg7ZVMYmk=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.28.12/go.mod h1:kcfd+eTdEi/40FIbLq4Hif3XMXnl5b/+t/KTfLt9xIk=
|
||||
github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q=
|
||||
github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
|
||||
github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE=
|
||||
github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM=
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.14 h1:f+eEi/2cKCg9pqKBoAIwRGzVb70MRKqWX4dg1BDcSJM=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.14/go.mod h1:wVPHWcIFv3WO89w0rE10gzf17ZYy+UVS1Geq8Iei34g=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.67 h1:9KxtdcIA/5xPNQyZRgUSpYOE6j9Bc4+D7nZua0KGYOM=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.67/go.mod h1:p3C44m+cfnbv763s52gCqrjaqyPikj9Sg47kUVaNZQQ=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 h1:x793wxmUWVDhshP8WW2mlnXuFrO4cOd3HLBroh1paFw=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30/go.mod h1:Jpne2tDnYiFascUEs2AWHJL9Yp7A5ZVy3TNyxaAjD6M=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 h1:ZK5jHhnrioRkUNOc+hOgQKlUL5JeC3S6JgLxtQ+Rm0Q=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34/go.mod h1:p4VfIceZokChbA9FzMbRGz5OV+lekcVtHlPKEO0gSZY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 h1:SZwFm17ZUNNg5Np0ioo/gq8Mn6u9w19Mri8DnJ15Jf0=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34/go.mod h1:dFZsC0BLo346mvKQLWmoJxT+Sjp+qcVR1tRVHQGOH9Q=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 h1:eAh2A4b5IzM/lum78bZ590jy36+d/aFLgKF/4Vd1xPE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3/go.mod h1:0yKJC/kb8sAnmlYa6Zs3QVYqaC8ug2AbnNChv5Ox3uA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 h1:dM9/92u2F1JbDaGooxTq18wmmFzbJRfXfVfy96/1CXM=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15/go.mod h1:SwFBy2vjtA0vZbjjaFtfN045boopadnoVPhu4Fv66vY=
|
||||
github.com/aws/aws-sdk-go-v2/service/kms v1.37.19 h1:QxVwGw8i/uiI9uXWwvS/m76wCJiiEV6xssBTvs3rwTw=
|
||||
github.com/aws/aws-sdk-go-v2/service/kms v1.37.19/go.mod h1:Lcpx4mFS+YjFuKvFaS3GM8qSFQIvRmItZEghMD8evRo=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 h1:1Gw+9ajCV1jogloEv1RRnvfRFia2cL6c9cuKV2Ps+G8=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.25.3/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 h1:hXmVKytPfTy5axZ+fYbR5d0cFmC3JvwLm5kM83luako=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 h1:1XuUZ8mYJw9B6lzAkXhqHlJd/XvaX32evhproijJEZY=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.19/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4=
|
||||
github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ=
|
||||
github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
|
||||
github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk=
|
||||
github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bmatcuk/doublestar v1.3.4 h1:gPypJ5xD31uhX6Tf54sDPUOBXTqKH4c9aPY66CyQrS0=
|
||||
github.com/bmatcuk/doublestar v1.3.4/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE=
|
||||
github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs=
|
||||
github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
|
||||
github.com/bytecodealliance/wasmtime-go/v3 v3.0.2 h1:3uZCA/BLTIu+DqCfguByNMJa2HVHpXvjfy0Dy7g6fuA=
|
||||
github.com/bytecodealliance/wasmtime-go/v3 v3.0.2/go.mod h1:RnUjnIXxEJcL6BgCvNyzCCRzZcxCgsZCi+RNlvYor5Q=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU=
|
||||
github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
|
||||
github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
|
||||
github.com/cyphar/filepath-securejoin v0.3.6 h1:4d9N5ykBnSp5Xn2JkhocYDkOpURL/18CYMpo6xB9uWM=
|
||||
github.com/cyphar/filepath-securejoin v0.3.6/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgraph-io/badger/v3 v3.2103.5 h1:ylPa6qzbjYRQMU6jokoj4wzcaweHylt//CH0AKt0akg=
|
||||
github.com/dgraph-io/badger/v3 v3.2103.5/go.mod h1:4MPiseMeDQ3FNCYwRbbcBOGJLf5jsE0PPFzRiKjtcdw=
|
||||
github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8=
|
||||
github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA=
|
||||
github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+UbP35JkH8yB7MYb4q/qhBarqZE6g=
|
||||
github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA=
|
||||
github.com/dgraph-io/badger/v4 v4.7.0 h1:Q+J8HApYAY7UMpL8d9owqiB+odzEc0zn/aqOD9jhc6Y=
|
||||
github.com/dgraph-io/badger/v4 v4.7.0/go.mod h1:He7TzG3YBy3j4f5baj5B7Zl2XyfNe5bl4Udl0aPemVA=
|
||||
github.com/dgraph-io/ristretto/v2 v2.2.0 h1:bkY3XzJcXoMuELV8F+vS8kzNgicwQFAaGINAEJdWGOM=
|
||||
github.com/dgraph-io/ristretto/v2 v2.2.0/go.mod h1:RZrm63UmcBAaYWC1DotLYBmTvgkrs0+XhBd7Npn7/zI=
|
||||
github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54 h1:SG7nF6SRlWhcT7cNTs5R6Hk4V2lcmLz2NsG2VnInyNo=
|
||||
github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA=
|
||||
github.com/digitorus/pkcs7 v0.0.0-20230713084857-e76b763bdc49/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc=
|
||||
github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 h1:ge14PCmCvPjpMQMIAH7uKg0lrtNSOdpYsRXlwk3QbaE=
|
||||
github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc=
|
||||
|
@ -125,15 +123,15 @@ github.com/foxcpp/go-mockdns v1.1.0 h1:jI0rD8M0wuYAxL7r/ynTrCQQq0BVqfB99Vgk7Dlme
|
|||
github.com/foxcpp/go-mockdns v1.1.0/go.mod h1:IhLeSFGed3mJIAXPH2aiRQB+kqz7oqu8ld2qVbOu7Wk=
|
||||
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI=
|
||||
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic=
|
||||
github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU=
|
||||
github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow=
|
||||
github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4=
|
||||
github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY=
|
||||
github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UNbRM=
|
||||
github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU=
|
||||
github.com/go-git/go-git/v5 v5.13.2 h1:7O7xvsK7K+rZPKW6AQR1YyNhfywkv7B8/FsP3ki6Zv0=
|
||||
github.com/go-git/go-git/v5 v5.13.2/go.mod h1:hWdW5P4YZRjmpGHwRH2v3zkWcNl6HeXaXQEMGb3NJ9A=
|
||||
github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=
|
||||
github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
|
||||
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-openapi/inflect v0.21.0 h1:FoBjBTQEcbg2cJUWX6uwL9OyIW8eqc9k4KhN4lfbeYk=
|
||||
|
@ -146,19 +144,19 @@ github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9Z
|
|||
github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk=
|
||||
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
|
||||
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
|
||||
github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
|
||||
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
|
||||
github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg=
|
||||
github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
|
||||
github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo=
|
||||
github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU=
|
||||
github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U=
|
||||
github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
|
||||
github.com/go-viper/mapstructure/v2 v2.3.0 h1:27XbWsHIqhbdR5TIC911OfYvgSaW93HM+dX7970Q7jk=
|
||||
github.com/go-viper/mapstructure/v2 v2.3.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
|
||||
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
|
||||
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
|
||||
github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA=
|
||||
github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
|
||||
github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4=
|
||||
github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68=
|
||||
github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
|
@ -172,12 +170,8 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W
|
|||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
|
||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/flatbuffers v2.0.8+incompatible h1:ivUb1cGomAB101ZM1T0nOiWz9pSrTMoa9+EiY7igmkM=
|
||||
github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
|
||||
github.com/google/flatbuffers v25.2.10+incompatible h1:F3vclr7C3HpB1k9mxCGRMXq6FdUalZ6H/pNX4FP1v0Q=
|
||||
github.com/google/flatbuffers v25.2.10+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
|
@ -185,8 +179,8 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
|
@ -199,8 +193,8 @@ github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
|||
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
||||
github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=
|
||||
github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 h1:e9Rjr40Z98/clHv5Yg79Is0NtosR5LXRvdr7o/6NwbA=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1/go.mod h1:tIxuGz/9mpox++sgp9fJjHO0+q1X9/UOWd798aAm22M=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
|
@ -210,16 +204,16 @@ github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs
|
|||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/hashicorp/hcl/v2 v2.20.1 h1:M6hgdyz7HYt1UN9e61j+qKJBqR3orTWbI1HKBJEdxtc=
|
||||
github.com/hashicorp/hcl/v2 v2.20.1/go.mod h1:TZDqQ4kNKCbh1iJp99FdPiUaVDDUPivbqxZulxDYqL4=
|
||||
github.com/in-toto/go-witness v0.6.0 h1:TGbVvWtvIdDMw/EcbRaMxshn8LADLQF8VdgBFzyUcEo=
|
||||
github.com/in-toto/go-witness v0.6.0/go.mod h1:ZwhfMkkNHtDc6dX/m7FYPy6lIqdL5BZQs5O+ZpxH6ss=
|
||||
github.com/in-toto/go-witness v0.8.6 h1:LOcOZAY5rgTh8+kGdgYi7D2Wmq/Pi+kc7vDh64GtuNk=
|
||||
github.com/in-toto/go-witness v0.8.6/go.mod h1:tKUe1sza2ZByk6ecKnPt35o3ufofJyPwO3c/0lnfFIw=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/invopop/jsonschema v0.12.0 h1:6ovsNSuvn9wEQVOyc72aycBMVQFKz7cPdMJn10CvzRI=
|
||||
github.com/invopop/jsonschema v0.12.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0=
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
|
||||
github.com/jellydator/ttlcache/v3 v3.2.0 h1:6lqVJ8X3ZaUwvzENqPAobDsXNExfUJd61u++uW8a3LE=
|
||||
github.com/jellydator/ttlcache/v3 v3.2.0/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4=
|
||||
github.com/jellydator/ttlcache/v3 v3.3.0 h1:BdoC9cE81qXfrxeb9eoJi9dWrdhSuwXMAnHTbnBm4Wc=
|
||||
github.com/jellydator/ttlcache/v3 v3.3.0/go.mod h1:bj2/e0l4jRnQdrnSTaGTsh4GSXvMjQcy41i7th0GVGw=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
|
@ -230,11 +224,12 @@ github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4
|
|||
github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
|
||||
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM=
|
||||
github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
|
||||
github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
|
@ -243,64 +238,66 @@ github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
|||
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
|
||||
github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
|
||||
github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM=
|
||||
github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4=
|
||||
github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY=
|
||||
github.com/minio/crc64nvme v1.0.1 h1:DHQPrYPdqK7jQG/Ls5CTBZWeex/2FMS3G5XGkycuFrY=
|
||||
github.com/minio/crc64nvme v1.0.1/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg=
|
||||
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
||||
github.com/minio/minio-go/v7 v7.0.73 h1:qr2vi96Qm7kZ4v7LLebjte+MQh621fFWnv93p12htEo=
|
||||
github.com/minio/minio-go/v7 v7.0.73/go.mod h1:qydcVzV8Hqtj1VtEocfxbmVFa2siu6HGa+LDEPogjD8=
|
||||
github.com/minio/minio-go/v7 v7.0.94 h1:1ZoksIKPyaSt64AVOyaQvhDOgVC3MfZsWM6mZXRUGtM=
|
||||
github.com/minio/minio-go/v7 v7.0.94/go.mod h1:71t2CqDt3ThzESgZUlU1rBN54mksGGlkLcFgguDnnAc=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0=
|
||||
github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/open-policy-agent/opa v0.64.0 h1:2g0JTt78zxhFaoBmZViY4UXvtOlzBjhhrnyrIxkm+tI=
|
||||
github.com/open-policy-agent/opa v0.64.0/go.mod h1:j4VeLorVpKipnkQ2TDjWshEuV3cvP/rHzQhYaraUXZY=
|
||||
github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4=
|
||||
github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/open-policy-agent/opa v1.4.2 h1:ag4upP7zMsa4WE2p1pwAFeG4Pn3mNwfAx9DLhhJfbjU=
|
||||
github.com/open-policy-agent/opa v1.4.2/go.mod h1:DNzZPKqKh4U0n0ANxcCVlw8lCSv2c+h5G/3QvSYdWZ8=
|
||||
github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c h1:dAMKvw0MlJT1GshSTtih8C2gDs04w8dReiOGXrGLNoY=
|
||||
github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM=
|
||||
github.com/pjbgf/sha1cd v0.3.2 h1:a9wb0bp1oC2TGwStyn0Umc/IGKQnEgF0vVaZ8QF8eo4=
|
||||
github.com/pjbgf/sha1cd v0.3.2/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
|
||||
github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
|
||||
github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk=
|
||||
github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE=
|
||||
github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U=
|
||||
github.com/prometheus/procfs v0.14.0 h1:Lw4VdGGoKEZilJsayHf0B+9YgLGREba2C6xr+Fdfq6s=
|
||||
github.com/prometheus/procfs v0.14.0/go.mod h1:XL+Iwz8k8ZabyZfMFHPiilCniixqQarAy5Mu67pHlNQ=
|
||||
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
|
||||
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
|
||||
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
|
||||
github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc=
|
||||
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||
github.com/rogpeppe/go-internal v1.13.2-0.20241226121412-a5dc8ff20d0a h1:w3tdWGKbLGBPtR/8/oO74W6hmz0qE5q0z9aqSAewaaM=
|
||||
github.com/rogpeppe/go-internal v1.13.2-0.20241226121412-a5dc8ff20d0a/go.mod h1:S8kfXMp+yh77OxPD4fdM6YUknrZpQxLhvxzS4gDHENY=
|
||||
github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU=
|
||||
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
|
||||
github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I=
|
||||
github.com/sigstore/fulcio v1.4.5 h1:WWNnrOknD0DbruuZWCbN+86WRROpEl3Xts+WT2Ek1yc=
|
||||
github.com/sigstore/fulcio v1.4.5/go.mod h1:oz3Qwlma8dWcSS/IENR/6SjbW4ipN0cxpRVfgdsjMU8=
|
||||
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8=
|
||||
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
|
||||
github.com/sigstore/fulcio v1.6.6 h1:XaMYX6TNT+8n7Npe8D94nyZ7/ERjEsNGFC+REdi/wzw=
|
||||
github.com/sigstore/fulcio v1.6.6/go.mod h1:BhQ22lwaebDgIxVBEYOOqLRcN5+xOV+C9bh/GUXRhOk=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ=
|
||||
github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo=
|
||||
github.com/skeema/knownhosts v1.3.0 h1:AM+y0rI04VksttfwjkSTNQorvGqmwATnvnAHpSgc0LY=
|
||||
github.com/skeema/knownhosts v1.3.0/go.mod h1:sPINvnADmT/qYH1kfv+ePMmOBTH6Tbl7b5LvTDjFK7M=
|
||||
github.com/sosodev/duration v1.3.1 h1:qtHBDMQ6lvMQsL15g4aopM4HEfOaYuhWBw3NPTtlqq4=
|
||||
github.com/sosodev/duration v1.3.1/go.mod h1:RQIBBX0+fMLc/D9+Jb/fwvVmo0eZvDDEERAikUR6SDg=
|
||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
|
||||
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
|
||||
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
|
||||
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
|
@ -311,18 +308,20 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
|||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/swaggo/files/v2 v2.0.0 h1:hmAt8Dkynw7Ssz46F6pn8ok6YmGZqHSVLZ+HQM7i0kw=
|
||||
github.com/swaggo/files/v2 v2.0.0/go.mod h1:24kk2Y9NYEJ5lHuCra6iVwkMjIekMCaFq/0JQj66kyM=
|
||||
github.com/swaggo/http-swagger/v2 v2.0.2 h1:FKCdLsl+sFCx60KFsyM0rDarwiUSZ8DqbfSyIKC9OBg=
|
||||
github.com/swaggo/http-swagger/v2 v2.0.2/go.mod h1:r7/GBkAWIfK6E/OLnE8fXnviHiDeAHmgIyooa4xm3AQ=
|
||||
github.com/swaggo/swag v1.16.3 h1:PnCYjPCah8FK4I26l2F/KQ4yz3sILcVUN3cTlBFA9Pg=
|
||||
github.com/swaggo/swag v1.16.3/go.mod h1:DImHIuOFXKpMFAQjcC7FG4m3Dg4+QuUgUzJmKjI/gRk=
|
||||
github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes=
|
||||
github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
|
||||
github.com/vektah/gqlparser/v2 v2.5.16 h1:1gcmLTvs3JLKXckwCwlUagVn/IlV2bwqle0vJ0vy5p8=
|
||||
github.com/vektah/gqlparser/v2 v2.5.16/go.mod h1:1lz1OeCqgQbQepsGxPVywrjdBHW2T08PUS3pJqepRww=
|
||||
github.com/swaggo/swag v1.16.4 h1:clWJtd9LStiG3VeijiCfOVODP6VpHtKdQy9ELFG3s1A=
|
||||
github.com/swaggo/swag v1.16.4/go.mod h1:VBsHJRsDvfYvqoiMKnsdwhNV9LEMHgEDZcyVYX0sxPg=
|
||||
github.com/tchap/go-patricia/v2 v2.3.2 h1:xTHFutuitO2zqKAQ5rCROYgUb7Or/+IC3fts9/Yc7nM=
|
||||
github.com/tchap/go-patricia/v2 v2.3.2/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
|
||||
github.com/tinylib/msgp v1.3.0 h1:ULuf7GPooDaIlbyvgAxBV/FI7ynli6LZ1/nVUNu+0ww=
|
||||
github.com/tinylib/msgp v1.3.0/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0=
|
||||
github.com/vektah/gqlparser/v2 v2.5.30 h1:EqLwGAFLIzt1wpx1IPpY67DwUujF1OfzgEyDsLrN6kE=
|
||||
github.com/vektah/gqlparser/v2 v2.5.30/go.mod h1:D1/VCZtV3LPnQrcPBeR/q5jkSQIPti0uYCP/RI0gIeo=
|
||||
github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8=
|
||||
github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok=
|
||||
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
|
||||
|
@ -343,39 +342,47 @@ github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8
|
|||
github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE=
|
||||
github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b h1:FosyBZYxY34Wul7O/MSKey3txpPYyCqVO5ZyceuQJEI=
|
||||
github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8=
|
||||
github.com/zclconf/go-cty-yaml v1.1.0 h1:nP+jp0qPHv2IhUVqmQSzjvqAWcObN0KBkUl2rWBdig0=
|
||||
github.com/zclconf/go-cty-yaml v1.1.0/go.mod h1:9YLUH4g7lOhVWqUbctnVlZ5KLpg7JAprQNgxSZ1Gyxs=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw=
|
||||
go.opentelemetry.io/otel v1.26.0 h1:LQwgL5s/1W7YiiRwxf03QGnWLb2HW4pLiAhaA5cZXBs=
|
||||
go.opentelemetry.io/otel v1.26.0/go.mod h1:UmLkJHUAidDval2EICqBMbnAd0/m2vmpf/dAM+fvFs4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0 h1:9M3+rhx7kZCIQQhQRYaZCdNu1V73tm4TvXs2ntl98C4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.22.0/go.mod h1:noq80iT8rrHP1SfybmPiRGc9dc5M8RPmGvtwo7Oo7tc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h1:nUeKExfxAQVbiVFn32YXpXZZHZ61Cc3s3Rn1pDBGAb0=
|
||||
go.opentelemetry.io/otel/metric v1.26.0 h1:7S39CLuY5Jgg9CrnA9HHiEjGMF/X2VHvoXGgSllRz30=
|
||||
go.opentelemetry.io/otel/metric v1.26.0/go.mod h1:SY+rHOI4cEawI9a7N1A4nIg/nTQXe1ccCNWYOJUrpX4=
|
||||
go.opentelemetry.io/otel/sdk v1.26.0 h1:Y7bumHf5tAiDlRYFmGqetNcLaVUZmh4iYfmGxtmz7F8=
|
||||
go.opentelemetry.io/otel/sdk v1.26.0/go.mod h1:0p8MXpqLeJ0pzcszQQN4F0S5FVjBLgypeGSngLsmirs=
|
||||
go.opentelemetry.io/otel/trace v1.26.0 h1:1ieeAUb4y0TE26jUFrCIXKpTuVK7uJGN9/Z/2LP5sQA=
|
||||
go.opentelemetry.io/otel/trace v1.26.0/go.mod h1:4iDxvGDQuUkHve82hJJ8UqrwswHYsZuWCBllGV2U2y0=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ=
|
||||
go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ=
|
||||
go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 h1:1fTNlAIJZGWLP5FVu0fikVry1IsiUnXjf7QFvoNN3Xw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0/go.mod h1:zjPK58DtkqQFn+YUMbx0M2XV3QgKU0gS9LeGohREyK4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 h1:m639+BofXTvcY1q8CGs4ItwQarYtJPOWmVobfM1HpVI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0/go.mod h1:LjReUci/F4BUyv+y4dwnq3h/26iNOeC3wAIqgvTIZVo=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 h1:xJ2qHD0C1BeYVTLLR9sX12+Qb95kfeD/byKj6Ky1pXg=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0/go.mod h1:u5BF1xyjstDowA1R5QAO9JHzqK+ublenEW/dyqTjBVk=
|
||||
go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M=
|
||||
go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE=
|
||||
go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY=
|
||||
go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg=
|
||||
go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs=
|
||||
go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc=
|
||||
go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4=
|
||||
go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
|
||||
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
|
||||
golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
|
||||
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f h1:99ci1mjWVBWwJiEKYY6jWa4d2nTQVIEhZIptnrVb1XY=
|
||||
golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f/go.mod h1:/lliqkxwWAhPjf5oSOIJup2XcqJaw8RGS6k3TGEc7GI=
|
||||
golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6 h1:y5zboxd6LQAqYIhHnB48p0ByQ/GnQx2BE33L8BOHQkI=
|
||||
golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6/go.mod h1:U6Lno4MTRCDY+Ba7aCcauB9T60gsv5s4ralQzP72ZoQ=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
|
||||
golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w=
|
||||
golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -385,28 +392,27 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL
|
|||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
|
||||
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
|
||||
golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
|
||||
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
|
||||
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
|
||||
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
|
||||
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||
golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
|
||||
golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
|
@ -415,8 +421,8 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn
|
|||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
|
||||
golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
|
||||
golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo=
|
||||
golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
@ -426,18 +432,18 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
|
|||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda h1:wu/KJm9KJwpfHWhkkZGohVC6KRrc1oJNr4jwtQMOQXw=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240429193739-8cf5692501f6 h1:DTJM0R8LECCgFeUwApvcEJHz85HLagW8uRENYxHh1ww=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240429193739-8cf5692501f6/go.mod h1:10yRODfgim2/T8csjQsMPgZOMvtytXKTDRzH6HRGzRw=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240429193739-8cf5692501f6 h1:DujSIu+2tC9Ht0aPNA7jgj23Iq8Ewi5sgkQ++wdvonE=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240429193739-8cf5692501f6/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
|
||||
google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a h1:nwKuGPlUAt+aR+pcrkfFRrTU1BVrSmYyYMxYbUIVHr0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a/go.mod h1:3kWAYMk1I75K4vykHtKt2ycnOgpA6974V7bREqbsenU=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a h1:51aaUVRocpvUOSQKM6Q7VuoaktNIaMCLuhZB6DKksq4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a/go.mod h1:uRxBH1mhmO8PGhU89cMcHaXKZqO+OfakD8QQO0oYwlQ=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM=
|
||||
google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
|
||||
google.golang.org/grpc v1.71.1 h1:ffsFWr7ygTUscGPI0KKK6TLrGz0476KUvvsbqWK0rPI=
|
||||
google.golang.org/grpc v1.71.1/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
@ -447,8 +453,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
|
|||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
|
||||
google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
|
@ -456,23 +462,20 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
|||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
|
||||
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
k8s.io/apimachinery v0.30.0 h1:qxVPsyDM5XS96NIh9Oj6LavoVFYff/Pon9cZeDIkHHA=
|
||||
k8s.io/apimachinery v0.30.0/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
|
||||
k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
|
||||
k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/utils v0.0.0-20240423183400-0849a56e8f22 h1:ao5hUqGhsqdm+bYbjH/pRkCs0unBGe9UyDahzs9zQzQ=
|
||||
k8s.io/utils v0.0.0-20240423183400-0849a56e8f22/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
|
||||
k8s.io/apimachinery v0.30.12 h1:41DC/4aa9twnQGBShHxh/LFoc7F4chsGBG/P2TW+J0Q=
|
||||
k8s.io/apimachinery v0.30.12/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2023 The Witness Contributors
|
||||
// Copyright 2023-2024 The Archivista Contributors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -26,9 +26,44 @@ import (
|
|||
"github.com/in-toto/go-witness/dsse"
|
||||
)
|
||||
|
||||
func Download(ctx context.Context, baseUrl string, gitoid string) (dsse.Envelope, error) {
|
||||
func DownloadReadCloser(ctx context.Context, baseURL string, gitoid string, requestOptions ...RequestOption) (io.ReadCloser, error) {
|
||||
return DownloadReadCloserWithHTTPClient(ctx, &http.Client{}, baseURL, gitoid, requestOptions...)
|
||||
}
|
||||
|
||||
func DownloadReadCloserWithHTTPClient(ctx context.Context, client *http.Client, baseURL string, gitoid string, requestOptions ...RequestOption) (io.ReadCloser, error) {
|
||||
downloadURL, err := url.JoinPath(baseURL, "download", gitoid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, downloadURL, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req = applyRequestOptions(req, requestOptions...)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
// NOTE: attempt to read body on error and
|
||||
// only close if an error occurs
|
||||
defer resp.Body.Close()
|
||||
errMsg, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, errors.New(string(errMsg))
|
||||
}
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
func Download(ctx context.Context, baseURL string, gitoid string, requestOptions ...RequestOption) (dsse.Envelope, error) {
|
||||
buf := &bytes.Buffer{}
|
||||
if err := DownloadWithWriter(ctx, baseUrl, gitoid, buf); err != nil {
|
||||
if err := DownloadWithWriter(ctx, baseURL, gitoid, buf, requestOptions...); err != nil {
|
||||
return dsse.Envelope{}, err
|
||||
}
|
||||
|
||||
|
@ -41,17 +76,22 @@ func Download(ctx context.Context, baseUrl string, gitoid string) (dsse.Envelope
|
|||
return env, nil
|
||||
}
|
||||
|
||||
func DownloadWithWriter(ctx context.Context, baseUrl, gitoid string, dst io.Writer) error {
|
||||
downloadUrl, err := url.JoinPath(baseUrl, "download", gitoid)
|
||||
func DownloadWithWriter(ctx context.Context, baseURL string, gitoid string, dst io.Writer, requestOptions ...RequestOption) error {
|
||||
return DownloadWithWriterWithHTTPClient(ctx, &http.Client{}, baseURL, gitoid, dst, requestOptions...)
|
||||
}
|
||||
|
||||
func DownloadWithWriterWithHTTPClient(ctx context.Context, client *http.Client, baseURL string, gitoid string, dst io.Writer, requestOptions ...RequestOption) error {
|
||||
downloadUrl, err := url.JoinPath(baseURL, "download", gitoid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", downloadUrl, nil)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, downloadUrl, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req = applyRequestOptions(req, requestOptions...)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
hc := &http.Client{}
|
||||
resp, err := hc.Do(req)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2023 The Witness Contributors
|
||||
// Copyright 2023-2024 The Archivista Contributors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -25,28 +25,65 @@ import (
|
|||
"net/url"
|
||||
)
|
||||
|
||||
type graphQLError struct {
|
||||
Message string `json:"message"`
|
||||
}
|
||||
const RetrieveSubjectsQuery = `query($gitoid: String!) {
|
||||
subjects(
|
||||
where: {
|
||||
hasStatementWith:{
|
||||
hasDsseWith:{
|
||||
gitoidSha256: $gitoid
|
||||
}
|
||||
}
|
||||
}
|
||||
) {
|
||||
edges {
|
||||
node{
|
||||
name
|
||||
subjectDigests{
|
||||
algorithm
|
||||
value
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
type graphQLResponse[T any] struct {
|
||||
Data T `json:"data,omitempty"`
|
||||
Errors []graphQLError `json:"errors,omitempty"`
|
||||
}
|
||||
const SearchQuery = `query($algo: String!, $digest: String!) {
|
||||
dsses(
|
||||
where: {
|
||||
hasStatementWith: {
|
||||
hasSubjectsWith: {
|
||||
hasSubjectDigestsWith: {
|
||||
value: $digest,
|
||||
algorithm: $algo
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
) {
|
||||
edges {
|
||||
node {
|
||||
gitoidSha256
|
||||
statement {
|
||||
attestationCollections {
|
||||
name
|
||||
attestations {
|
||||
type
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}`
|
||||
|
||||
type graphQLRequestBody[TVars any] struct {
|
||||
Query string `json:"query"`
|
||||
Variables TVars `json:"variables,omitempty"`
|
||||
}
|
||||
|
||||
func GraphQlQuery[TRes any, TVars any](ctx context.Context, baseUrl, query string, vars TVars) (TRes, error) {
|
||||
func GraphQlQuery[TRes any, TVars any](ctx context.Context, baseUrl, query string, vars TVars, requestOptions ...RequestOption) (TRes, error) {
|
||||
var response TRes
|
||||
queryUrl, err := url.JoinPath(baseUrl, "query")
|
||||
if err != nil {
|
||||
return response, err
|
||||
}
|
||||
|
||||
requestBody := graphQLRequestBody[TVars]{
|
||||
requestBody := GraphQLRequestBodyGeneric[TVars]{
|
||||
Query: query,
|
||||
Variables: vars,
|
||||
}
|
||||
|
@ -56,11 +93,12 @@ func GraphQlQuery[TRes any, TVars any](ctx context.Context, baseUrl, query strin
|
|||
return response, err
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", queryUrl, bytes.NewReader(reqBody))
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, queryUrl, bytes.NewReader(reqBody))
|
||||
if err != nil {
|
||||
return response, err
|
||||
}
|
||||
|
||||
req = applyRequestOptions(req, requestOptions...)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
hc := &http.Client{}
|
||||
res, err := hc.Do(req)
|
||||
|
@ -79,7 +117,7 @@ func GraphQlQuery[TRes any, TVars any](ctx context.Context, baseUrl, query strin
|
|||
}
|
||||
|
||||
dec := json.NewDecoder(res.Body)
|
||||
gqlRes := graphQLResponse[TRes]{}
|
||||
gqlRes := GraphQLResponseGeneric[TRes]{}
|
||||
if err := dec.Decode(&gqlRes); err != nil {
|
||||
return response, err
|
||||
}
|
||||
|
@ -90,3 +128,14 @@ func GraphQlQuery[TRes any, TVars any](ctx context.Context, baseUrl, query strin
|
|||
|
||||
return gqlRes.Data, nil
|
||||
}
|
||||
|
||||
// Deprecated: Use GraphQlQuery with the WithHeaders RequestOption
|
||||
func GraphQlQueryWithHeaders[TRes any, TVars any](ctx context.Context, baseUrl, query string, vars TVars, headers map[string]string, requestOptions ...RequestOption) (TRes, error) {
|
||||
h := http.Header{}
|
||||
for k, v := range headers {
|
||||
h.Set(k, v)
|
||||
}
|
||||
|
||||
requestOptions = append(requestOptions, WithHeaders(h))
|
||||
return GraphQlQuery[TRes](ctx, baseUrl, query, vars, requestOptions...)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
// Copyright 2025 The Archivista Contributors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package api
|
||||
|
||||
import "net/http"
|
||||
|
||||
type RequestOption func(*requestOptions)
|
||||
|
||||
type requestOptions struct {
|
||||
additionalHeaders http.Header
|
||||
}
|
||||
|
||||
func WithHeaders(h http.Header) RequestOption {
|
||||
return func(ro *requestOptions) {
|
||||
if h != nil {
|
||||
ro.additionalHeaders = h.Clone()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func applyRequestOptions(req *http.Request, requestOpts ...RequestOption) *http.Request {
|
||||
if req == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
opts := &requestOptions{}
|
||||
for _, opt := range requestOpts {
|
||||
if opt == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
opt(opts)
|
||||
}
|
||||
|
||||
if opts.additionalHeaders != nil {
|
||||
req.Header = opts.additionalHeaders
|
||||
}
|
||||
|
||||
return req
|
||||
}
|
|
@ -0,0 +1,90 @@
|
|||
// Copyright 2024 The Archivista Contributors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package api
|
||||
|
||||
type GraphQLError struct {
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
type GraphQLResponseGeneric[T any] struct {
|
||||
Data T `json:"data,omitempty"`
|
||||
Errors []GraphQLError `json:"errors,omitempty"`
|
||||
}
|
||||
|
||||
type GraphQLRequestBodyGeneric[TVars any] struct {
|
||||
Query string `json:"query"`
|
||||
Variables TVars `json:"variables,omitempty"`
|
||||
}
|
||||
|
||||
type RetrieveSubjectVars struct {
|
||||
Gitoid string `json:"gitoid"`
|
||||
}
|
||||
|
||||
type SearchVars struct {
|
||||
Algorithm string `json:"algo"`
|
||||
Digest string `json:"digest"`
|
||||
}
|
||||
|
||||
type RetrieveSubjectResults struct {
|
||||
Subjects Subjects `json:"subjects"`
|
||||
}
|
||||
|
||||
type Subjects struct {
|
||||
Edges []SubjectEdge `json:"edges"`
|
||||
}
|
||||
|
||||
type SubjectEdge struct {
|
||||
Node SubjectNode `json:"node"`
|
||||
}
|
||||
|
||||
type SubjectNode struct {
|
||||
Name string `json:"name"`
|
||||
SubjectDigests []SubjectDigest `json:"subjectDigests"`
|
||||
}
|
||||
|
||||
type SubjectDigest struct {
|
||||
Algorithm string `json:"algorithm"`
|
||||
Value string `json:"value"`
|
||||
}
|
||||
|
||||
type SearchResults struct {
|
||||
Dsses DSSES `json:"dsses"`
|
||||
}
|
||||
|
||||
type DSSES struct {
|
||||
Edges []SearchEdge `json:"edges"`
|
||||
}
|
||||
|
||||
type SearchEdge struct {
|
||||
Node SearchNode `json:"node"`
|
||||
}
|
||||
|
||||
type SearchNode struct {
|
||||
GitoidSha256 string `json:"gitoidSha256"`
|
||||
Statement Statement `json:"statement"`
|
||||
}
|
||||
|
||||
type Statement struct {
|
||||
AttestationCollection AttestationCollection `json:"attestationCollections"`
|
||||
}
|
||||
|
||||
type AttestationCollection struct {
|
||||
Name string `json:"name"`
|
||||
Attestations []Attestation `json:"attestations"`
|
||||
}
|
||||
|
||||
type Attestation struct {
|
||||
Type string `json:"type"`
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2023 The Witness Contributors
|
||||
// Copyright 2023-2024 The Archivista Contributors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -33,28 +33,27 @@ type UploadResponse struct {
|
|||
// Deprecated: Use UploadResponse instead. It will be removed in version >= v0.6.0
|
||||
type StoreResponse = UploadResponse
|
||||
|
||||
// Deprecated: Use Upload instead. It will be removed in version >= v0.6.0
|
||||
func Store(ctx context.Context, baseUrl string, envelope dsse.Envelope) (StoreResponse, error) {
|
||||
return Upload(ctx, baseUrl, envelope)
|
||||
// Deprecated: Use Store instead. It will be removed in version >= v0.6.0
|
||||
func Upload(ctx context.Context, baseURL string, envelope dsse.Envelope, requestOptions ...RequestOption) (UploadResponse, error) {
|
||||
return Store(ctx, baseURL, envelope, requestOptions...)
|
||||
}
|
||||
|
||||
func Upload(ctx context.Context, baseUrl string, envelope dsse.Envelope) (StoreResponse, error) {
|
||||
func Store(ctx context.Context, baseURL string, envelope dsse.Envelope, requestOptions ...RequestOption) (StoreResponse, error) {
|
||||
buf := &bytes.Buffer{}
|
||||
enc := json.NewEncoder(buf)
|
||||
if err := enc.Encode(envelope); err != nil {
|
||||
return StoreResponse{}, err
|
||||
}
|
||||
|
||||
return UploadWithReader(ctx, baseUrl, buf)
|
||||
return StoreWithReader(ctx, baseURL, buf, requestOptions...)
|
||||
}
|
||||
|
||||
// Deprecated: Use UploadWithReader instead. It will be removed in version >= v0.6.0
|
||||
func StoreWithReader(ctx context.Context, baseUrl string, r io.Reader) (StoreResponse, error) {
|
||||
return UploadWithReader(ctx, baseUrl, r)
|
||||
func StoreWithReader(ctx context.Context, baseURL string, r io.Reader, requestOptions ...RequestOption) (StoreResponse, error) {
|
||||
return StoreWithReaderWithHTTPClient(ctx, &http.Client{}, baseURL, r, requestOptions...)
|
||||
}
|
||||
|
||||
func UploadWithReader(ctx context.Context, baseUrl string, r io.Reader) (StoreResponse, error) {
|
||||
uploadPath, err := url.JoinPath(baseUrl, "upload")
|
||||
func StoreWithReaderWithHTTPClient(ctx context.Context, client *http.Client, baseURL string, r io.Reader, requestOptions ...RequestOption) (StoreResponse, error) {
|
||||
uploadPath, err := url.JoinPath(baseURL, "upload")
|
||||
if err != nil {
|
||||
return UploadResponse{}, err
|
||||
}
|
||||
|
@ -64,6 +63,7 @@ func UploadWithReader(ctx context.Context, baseUrl string, r io.Reader) (StoreRe
|
|||
return UploadResponse{}, err
|
||||
}
|
||||
|
||||
req = applyRequestOptions(req, requestOptions...)
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
hc := &http.Client{}
|
||||
resp, err := hc.Do(req)
|
||||
|
|
|
@ -26,13 +26,20 @@ import (
|
|||
|
||||
type Config struct {
|
||||
ListenOn string `default:"tcp://127.0.0.1:8082" desc:"URL endpoint for Archivista to listen on" split_words:"true"`
|
||||
ReadTimeout int `default:"120" desc:"HTTP read timeout in seconds" split_words:"true"`
|
||||
WriteTimeout int `default:"120" desc:"HTTP write timeout in seconds" split_words:"true"`
|
||||
LogLevel string `default:"INFO" desc:"Log level" split_words:"true"`
|
||||
CORSAllowOrigins []string `default:"" desc:"Comma separated list of origins to allow CORS requests from" split_words:"true"`
|
||||
|
||||
EnableTLS bool `default:"FALSE" desc:"Enables TLS on the Archivista server" split_words:"true"`
|
||||
TLSCert string `default:"" desc:"Path to the file containing the TLS Certificate" split_words:"true"`
|
||||
TLSKey string `default:"" desc:"Path to the file containing the TLS Key" split_words:"true"`
|
||||
|
||||
EnableSPIFFE bool `default:"TRUE" desc:"*** Enable SPIFFE support" split_words:"true"`
|
||||
SPIFFEAddress string `default:"unix:///tmp/spire-agent/public/api.sock" desc:"SPIFFE server address" split_words:"true"`
|
||||
SPIFFETrustedServerId string `default:"" desc:"Trusted SPIFFE server ID; defaults to any" split_words:"true"`
|
||||
|
||||
EnableSQLStore bool `default:"TRUE" desc:"*** Enable SQL Metadata store. If disabled, GraphQL will also be disabled ***" split_words:"true"`
|
||||
SQLStoreConnectionString string `default:"root:example@tcp(db)/testify" desc:"SQL store connection string" split_words:"true"`
|
||||
SQLStoreBackend string `default:"MYSQL" desc:"SQL backend to use. Options are MYSQL, PSQL" split_words:"true"`
|
||||
SQLStoreMaxIdleConnections int `default:"10" desc:"Maximum number of connections in the idle connection pool" split_words:"true"`
|
||||
|
@ -49,11 +56,19 @@ type Config struct {
|
|||
BlobStoreUseTLS bool `default:"TRUE" desc:"Use TLS for BLOB storage backend. Only valid when using BLOB storage backend." split_words:"true"`
|
||||
BlobStoreBucketName string `default:"" desc:"Bucket to use for storage. Only valid when using BLOB storage backend." split_words:"true"`
|
||||
|
||||
EnableGraphql bool `default:"TRUE" desc:"*** Enable GraphQL Endpoint" split_words:"true"`
|
||||
EnableGraphql bool `default:"TRUE" desc:"*** Enable GraphQL Endpoint. If GraphQL is disabled, Archivista will be unable to be used by Witness to verify policies" split_words:"true"`
|
||||
GraphqlWebClientEnable bool `default:"TRUE" desc:"Enable GraphiQL, the GraphQL web client" split_words:"true"`
|
||||
|
||||
EnableArtifactStore bool `default:"FALSE" desc:"*** Enable Artifact Store Endpoints" split_words:"true"`
|
||||
ArtifactStoreConfig string `default:"/tmp/artifacts/config.yaml" desc:"Location of the config describing available artifacts" split_words:"true"`
|
||||
|
||||
Publisher []string `default:"" desc:"Publisher to use. Options are DAPR, RSTUF or empty string for disabled." split_words:"true"`
|
||||
PublisherDaprHost string `default:"http://127.0.0.1" desc:"Host for Dapr" split_words:"true"`
|
||||
PublisherDaprPort string `default:"3500" desc:"Port for Dapr" split_words:"true"`
|
||||
PublisherDaprURL string `default:"" desc:"URL for Dapr" split_words:"true"`
|
||||
PublisherDaprComponentName string `default:"archivista" desc:"Dapr pubsub component name" split_words:"true"`
|
||||
PublisherDaprTopic string `default:"attestations" desc:"Dapr pubsub topic" split_words:"true"`
|
||||
PublisherRstufHost string `default:"http://127.0.0.1" desc:"Host for RSTUF" split_words:"true"`
|
||||
}
|
||||
|
||||
// Process reads config from env
|
||||
|
@ -76,7 +91,7 @@ func (c *Config) Process() error {
|
|||
}
|
||||
}
|
||||
|
||||
//check if both are being used and error if so
|
||||
// check if both are being used and error if so
|
||||
if usingDeprecatedEnv && usingNewEnv {
|
||||
err := errors.New("both deprecated and new environment variables are being used. Please use only the new environment variables")
|
||||
return err
|
||||
|
|
|
@ -26,6 +26,8 @@ import (
|
|||
func TestConfig_Process(t *testing.T) {
|
||||
// Set up test environment variables
|
||||
os.Setenv("ARCHIVISTA_LISTEN_ON", "tcp://0.0.0.0:8082")
|
||||
os.Setenv("ARCHIVISTA_READ_TIMEOUT", "300")
|
||||
os.Setenv("ARCHIVISTA_WRITE_TIMEOUT", "300")
|
||||
os.Setenv("ARCHIVISTA_LOG_LEVEL", "DEBUG")
|
||||
os.Setenv("ARCHIVISTA_CORS_ALLOW_ORIGINS", "http://localhost,https://example.com")
|
||||
os.Setenv("ARCHIVISTA_ENABLE_SPIFFE", "FALSE")
|
||||
|
@ -44,6 +46,8 @@ func TestConfig_Process(t *testing.T) {
|
|||
|
||||
// Check that the expected values were read from environment variables
|
||||
require.Equal(t, "tcp://0.0.0.0:8082", c.ListenOn)
|
||||
require.Equal(t, 300, c.ReadTimeout)
|
||||
require.Equal(t, 300, c.WriteTimeout)
|
||||
require.Equal(t, "DEBUG", c.LogLevel)
|
||||
require.Equal(t, []string{"http://localhost", "https://example.com"}, c.CORSAllowOrigins)
|
||||
require.False(t, c.EnableSPIFFE)
|
||||
|
@ -57,6 +61,8 @@ func TestConfig_Process(t *testing.T) {
|
|||
|
||||
// Clean up environment variables
|
||||
os.Unsetenv("ARCHIVISTA_LISTEN_ON")
|
||||
os.Unsetenv("ARCHIVISTA_READ_TIMEOUT")
|
||||
os.Unsetenv("ARCHIVISTA_WRITE_TIMEOUT")
|
||||
os.Unsetenv("ARCHIVISTA_LOG_LEVEL")
|
||||
os.Unsetenv("ARCHIVISTA_CORS_ALLOW_ORIGINS")
|
||||
os.Unsetenv("ARCHIVISTA_ENABLE_SPIFFE")
|
||||
|
|
|
@ -0,0 +1,254 @@
|
|||
// Copyright 2024 The Archivista Contributors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package httpclient
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/in-toto/archivista/pkg/api"
|
||||
"github.com/in-toto/go-witness/dsse"
|
||||
)
|
||||
|
||||
type ArchivistaClient struct {
|
||||
BaseURL string
|
||||
GraphQLURL string
|
||||
requestHeaders http.Header
|
||||
*http.Client
|
||||
}
|
||||
|
||||
type Option func(*ArchivistaClient)
|
||||
|
||||
func WithHeaders(h http.Header) Option {
|
||||
return func(ac *ArchivistaClient) {
|
||||
if h != nil {
|
||||
ac.requestHeaders = h.Clone()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ac *ArchivistaClient) requestOptions() []api.RequestOption {
|
||||
opts := []api.RequestOption{}
|
||||
if ac.requestHeaders != nil {
|
||||
opts = append(opts, api.WithHeaders(ac.requestHeaders))
|
||||
}
|
||||
|
||||
return opts
|
||||
}
|
||||
|
||||
type HttpClienter interface {
|
||||
DownloadDSSE(ctx context.Context, gitoid string) (dsse.Envelope, error)
|
||||
DownloadReadCloser(ctx context.Context, gitoid string) (io.ReadCloser, error)
|
||||
DownloadWithWriter(ctx context.Context, gitoid string, dst io.Writer) error
|
||||
Store(ctx context.Context, envelope dsse.Envelope) (api.UploadResponse, error)
|
||||
StoreWithReader(ctx context.Context, r io.Reader) (api.UploadResponse, error)
|
||||
GraphQLRetrieveSubjectResults(ctx context.Context, gitoid string) (api.RetrieveSubjectResults, error)
|
||||
GraphQLRetrieveSearchResults(ctx context.Context, algo string, digest string) (api.SearchResults, error)
|
||||
GraphQLQueryIface(ctx context.Context, query string, variables interface{}) (*GraphQLResponseInterface, error)
|
||||
GraphQLQueryToDst(ctx context.Context, query string, variables interface{}, dst interface{}) error
|
||||
GraphQLQueryReadCloser(ctx context.Context, query string, variables interface{}) (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
func CreateArchivistaClient(httpClient *http.Client, baseURL string, opts ...Option) (*ArchivistaClient, error) {
|
||||
client := &ArchivistaClient{
|
||||
BaseURL: baseURL,
|
||||
Client: http.DefaultClient,
|
||||
}
|
||||
|
||||
if httpClient != nil {
|
||||
client.Client = httpClient
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(client)
|
||||
}
|
||||
|
||||
var err error
|
||||
client.GraphQLURL, err = url.JoinPath(client.BaseURL, "query")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (ac *ArchivistaClient) DownloadDSSE(ctx context.Context, gitoid string) (dsse.Envelope, error) {
|
||||
reader, err := api.DownloadReadCloserWithHTTPClient(ctx, ac.Client, ac.BaseURL, gitoid, ac.requestOptions()...)
|
||||
if err != nil {
|
||||
return dsse.Envelope{}, err
|
||||
}
|
||||
env := dsse.Envelope{}
|
||||
if err := json.NewDecoder(reader).Decode(&env); err != nil {
|
||||
return dsse.Envelope{}, err
|
||||
}
|
||||
return env, nil
|
||||
}
|
||||
|
||||
func (ac *ArchivistaClient) DownloadReadCloser(ctx context.Context, gitoid string) (io.ReadCloser, error) {
|
||||
return api.DownloadReadCloserWithHTTPClient(ctx, ac.Client, ac.BaseURL, gitoid, ac.requestOptions()...)
|
||||
}
|
||||
|
||||
func (ac *ArchivistaClient) DownloadWithWriter(ctx context.Context, gitoid string, dst io.Writer) error {
|
||||
return api.DownloadWithWriterWithHTTPClient(ctx, ac.Client, ac.BaseURL, gitoid, dst, ac.requestOptions()...)
|
||||
}
|
||||
|
||||
func (ac *ArchivistaClient) Store(ctx context.Context, envelope dsse.Envelope) (api.UploadResponse, error) {
|
||||
return api.Store(ctx, ac.BaseURL, envelope, ac.requestOptions()...)
|
||||
}
|
||||
|
||||
func (ac *ArchivistaClient) StoreWithReader(ctx context.Context, r io.Reader) (api.UploadResponse, error) {
|
||||
return api.StoreWithReader(ctx, ac.BaseURL, r, ac.requestOptions()...)
|
||||
}
|
||||
|
||||
type GraphQLRequestBodyInterface struct {
|
||||
Query string `json:"query"`
|
||||
Variables interface{} `json:"variables,omitempty"`
|
||||
}
|
||||
|
||||
type GraphQLResponseInterface struct {
|
||||
Data interface{}
|
||||
Errors []api.GraphQLError `json:"errors,omitempty"`
|
||||
}
|
||||
|
||||
// GraphQLRetrieveSubjectResults retrieves the subjects for a given gitoid.
|
||||
func (ac *ArchivistaClient) GraphQLRetrieveSubjectResults(
|
||||
ctx context.Context,
|
||||
gitoid string,
|
||||
) (api.RetrieveSubjectResults, error) {
|
||||
return api.GraphQlQuery[api.RetrieveSubjectResults](
|
||||
ctx,
|
||||
ac.BaseURL,
|
||||
api.RetrieveSubjectsQuery,
|
||||
api.RetrieveSubjectVars{Gitoid: gitoid},
|
||||
ac.requestOptions()...,
|
||||
)
|
||||
}
|
||||
|
||||
// GraphQLRetrieveSearchResults retrieves the search results for a given algorithm and digest.
|
||||
func (ac *ArchivistaClient) GraphQLRetrieveSearchResults(
|
||||
ctx context.Context,
|
||||
algo string,
|
||||
digest string,
|
||||
) (api.SearchResults, error) {
|
||||
return api.GraphQlQuery[api.SearchResults](
|
||||
ctx,
|
||||
ac.BaseURL,
|
||||
api.SearchQuery,
|
||||
api.SearchVars{Algorithm: algo, Digest: digest},
|
||||
ac.requestOptions()...,
|
||||
)
|
||||
}
|
||||
|
||||
// GraphQLQueryIface executes a GraphQL query against the Archivista API and returns the response as an interface.
|
||||
//
|
||||
// Parameters:
|
||||
// - ctx: The context to control the query's lifecycle, such as cancellations or deadlines.
|
||||
// - query: A string representing the GraphQL query to be executed.
|
||||
// - variables: A map or struct containing variables to parameterize the query.
|
||||
//
|
||||
// Returns:
|
||||
// - A pointer to a GraphQLResponseInterface containing the query's result or errors.
|
||||
// - An error if the query execution or response parsing fails.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// response, err := client.GraphQLQueryIface(ctx, query, variables)
|
||||
// if err != nil {
|
||||
// log.Fatalf("GraphQL query failed: %v", err)
|
||||
// }
|
||||
// fmt.Printf("Response data: %+v\n", response.Data)
|
||||
func (ac *ArchivistaClient) GraphQLQueryIface(
|
||||
ctx context.Context,
|
||||
query string,
|
||||
variables interface{},
|
||||
) (*GraphQLResponseInterface, error) {
|
||||
reader, err := ac.GraphQLQueryReadCloser(ctx, query, variables)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer reader.Close()
|
||||
gqlRes := GraphQLResponseInterface{}
|
||||
dec := json.NewDecoder(reader)
|
||||
if err := dec.Decode(&gqlRes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(gqlRes.Errors) > 0 {
|
||||
return nil, fmt.Errorf("graph ql query failed: %v", gqlRes.Errors)
|
||||
}
|
||||
return &gqlRes, nil
|
||||
}
|
||||
|
||||
// GraphQLQueryToDst executes a GraphQL query against the Archivista API and unmarshals the response into a destination object.
|
||||
func (ac *ArchivistaClient) GraphQLQueryToDst(ctx context.Context, query string, variables interface{}, dst interface{}) error {
|
||||
reader, err := ac.GraphQLQueryReadCloser(ctx, query, variables)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer reader.Close()
|
||||
dec := json.NewDecoder(reader)
|
||||
if err := dec.Decode(&dst); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GraphQLQueryReadCloser executes a GraphQL query against the Archivista API and returns the response as an io.ReadCloser.
|
||||
func (ac *ArchivistaClient) GraphQLQueryReadCloser(
|
||||
ctx context.Context,
|
||||
query string,
|
||||
variables interface{},
|
||||
) (io.ReadCloser, error) {
|
||||
requestBodyMap := GraphQLRequestBodyInterface{
|
||||
Query: query,
|
||||
Variables: variables,
|
||||
}
|
||||
|
||||
requestBodyJSON, err := json.Marshal(requestBodyMap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, ac.GraphQLURL, bytes.NewReader(requestBodyJSON))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ac.requestHeaders != nil {
|
||||
req.Header = ac.requestHeaders
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
res, err := ac.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if res.StatusCode != http.StatusOK {
|
||||
defer res.Body.Close()
|
||||
errMsg, err := io.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, errors.New(string(errMsg))
|
||||
}
|
||||
|
||||
return res.Body, nil
|
||||
}
|
|
@ -0,0 +1,320 @@
|
|||
// Copyright 2024 The Archivista Contributors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package httpclient_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/in-toto/archivista/pkg/api"
|
||||
httpclient "github.com/in-toto/archivista/pkg/http-client"
|
||||
"github.com/in-toto/go-witness/dsse"
|
||||
"github.com/stretchr/testify/suite"
|
||||
)
|
||||
|
||||
// Test Suite: UT HTTPClientDownloadSuite
|
||||
type UTHTTPClientDownloadSuite struct {
|
||||
suite.Suite
|
||||
}
|
||||
|
||||
func TestHTTPClientAPIDownloadSuite(t *testing.T) {
|
||||
suite.Run(t, new(UTHTTPClientDownloadSuite))
|
||||
}
|
||||
|
||||
func (ut *UTHTTPClientDownloadSuite) Test_DownloadDSSE() {
|
||||
testEnvelope, err := os.ReadFile("../../test/package.attestation.json")
|
||||
if err != nil {
|
||||
ut.FailNow(err.Error())
|
||||
}
|
||||
expectedEnvelop := dsse.Envelope{}
|
||||
err = json.Unmarshal(testEnvelope, &expectedEnvelop)
|
||||
if err != nil {
|
||||
ut.FailNow(err.Error())
|
||||
}
|
||||
testServer := httptest.NewServer(
|
||||
http.HandlerFunc(
|
||||
func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, err = w.Write(testEnvelope)
|
||||
if err != nil {
|
||||
ut.FailNow(err.Error())
|
||||
}
|
||||
},
|
||||
),
|
||||
)
|
||||
defer testServer.Close()
|
||||
ctx := context.TODO()
|
||||
client, err := httpclient.CreateArchivistaClient(http.DefaultClient, testServer.URL)
|
||||
if err != nil {
|
||||
ut.FailNow(err.Error())
|
||||
}
|
||||
resp, err := client.DownloadDSSE(ctx, "gitoid_test")
|
||||
if err != nil {
|
||||
ut.FailNow(err.Error())
|
||||
}
|
||||
ut.Equal(expectedEnvelop, resp)
|
||||
}
|
||||
|
||||
func (ut *UTHTTPClientDownloadSuite) Test_DownloadReadCloser() {
|
||||
testEnvelope, err := os.ReadFile("../../test/package.attestation.json")
|
||||
if err != nil {
|
||||
ut.FailNow(err.Error())
|
||||
}
|
||||
expectedEnvelop := dsse.Envelope{}
|
||||
err = json.Unmarshal(testEnvelope, &expectedEnvelop)
|
||||
if err != nil {
|
||||
ut.FailNow(err.Error())
|
||||
}
|
||||
testServer := httptest.NewServer(
|
||||
http.HandlerFunc(
|
||||
func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, err = w.Write(testEnvelope)
|
||||
if err != nil {
|
||||
ut.FailNow(err.Error())
|
||||
}
|
||||
},
|
||||
),
|
||||
)
|
||||
defer testServer.Close()
|
||||
ctx := context.TODO()
|
||||
client, err := httpclient.CreateArchivistaClient(http.DefaultClient, testServer.URL)
|
||||
if err != nil {
|
||||
ut.FailNow(err.Error())
|
||||
}
|
||||
readCloser, err := client.DownloadReadCloser(ctx, "gitoid_test")
|
||||
if err != nil {
|
||||
ut.FailNow(err.Error())
|
||||
}
|
||||
env := dsse.Envelope{}
|
||||
if err := json.NewDecoder(readCloser).Decode(&env); err != nil {
|
||||
ut.FailNow(err.Error())
|
||||
}
|
||||
ut.Equal(expectedEnvelop, env)
|
||||
}
|
||||
|
||||
func (ut *UTHTTPClientDownloadSuite) Test_DownloadWithWriter() {
|
||||
testEnvelope, err := os.ReadFile("../../test/package.attestation.json")
|
||||
if err != nil {
|
||||
ut.FailNow(err.Error())
|
||||
}
|
||||
expectedEnvelop := dsse.Envelope{}
|
||||
err = json.Unmarshal(testEnvelope, &expectedEnvelop)
|
||||
if err != nil {
|
||||
ut.FailNow(err.Error())
|
||||
}
|
||||
testServer := httptest.NewServer(
|
||||
http.HandlerFunc(
|
||||
func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, err = w.Write(testEnvelope)
|
||||
if err != nil {
|
||||
ut.FailNow(err.Error())
|
||||
}
|
||||
},
|
||||
),
|
||||
)
|
||||
defer testServer.Close()
|
||||
ctx := context.TODO()
|
||||
client, err := httpclient.CreateArchivistaClient(http.DefaultClient, testServer.URL)
|
||||
if err != nil {
|
||||
ut.FailNow(err.Error())
|
||||
}
|
||||
buf := bytes.NewBuffer(nil)
|
||||
if err := client.DownloadWithWriter(ctx, "gitoid_test", buf); err != nil {
|
||||
ut.FailNow(err.Error())
|
||||
}
|
||||
env := dsse.Envelope{}
|
||||
if err := json.NewDecoder(buf).Decode(&env); err != nil {
|
||||
ut.FailNow(err.Error())
|
||||
}
|
||||
ut.Equal(expectedEnvelop, env)
|
||||
}
|
||||
|
||||
// Test Suite: UT HTTPClientStore
|
||||
type UTHTTPClientStoreSuite struct {
|
||||
suite.Suite
|
||||
}
|
||||
|
||||
func TestAPIStoreSuite(t *testing.T) {
|
||||
suite.Run(t, new(UTHTTPClientStoreSuite))
|
||||
}
|
||||
|
||||
func (ut *UTHTTPClientStoreSuite) Test_Store() {
|
||||
testServer := httptest.NewServer(
|
||||
http.HandlerFunc(
|
||||
func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, err := w.Write([]byte(`{"gitoid":"test"}`))
|
||||
if err != nil {
|
||||
ut.FailNow(err.Error())
|
||||
}
|
||||
},
|
||||
),
|
||||
)
|
||||
defer testServer.Close()
|
||||
ctx := context.TODO()
|
||||
attFile, err := os.ReadFile("../../test/package.attestation.json")
|
||||
if err != nil {
|
||||
ut.FailNow(err.Error())
|
||||
}
|
||||
attEnvelop := dsse.Envelope{}
|
||||
err = json.Unmarshal(attFile, &attEnvelop)
|
||||
if err != nil {
|
||||
ut.FailNow(err.Error())
|
||||
}
|
||||
client, err := httpclient.CreateArchivistaClient(http.DefaultClient, testServer.URL)
|
||||
if err != nil {
|
||||
ut.FailNow(err.Error())
|
||||
}
|
||||
resp, err := client.Store(ctx, attEnvelop)
|
||||
if err != nil {
|
||||
ut.FailNow(err.Error())
|
||||
}
|
||||
ut.Equal(resp, api.UploadResponse{Gitoid: "test"})
|
||||
}
|
||||
|
||||
func (ut *UTHTTPClientStoreSuite) Test_StoreWithReader() {
|
||||
testServer := httptest.NewServer(
|
||||
http.HandlerFunc(
|
||||
func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, err := w.Write([]byte(`{"gitoid":"test"}`))
|
||||
if err != nil {
|
||||
ut.FailNow(err.Error())
|
||||
}
|
||||
},
|
||||
),
|
||||
)
|
||||
defer testServer.Close()
|
||||
attIo, err := os.Open("../../test/package.attestation.json")
|
||||
if err != nil {
|
||||
ut.FailNow(err.Error())
|
||||
}
|
||||
ctx := context.TODO()
|
||||
client, err := httpclient.CreateArchivistaClient(http.DefaultClient, testServer.URL)
|
||||
if err != nil {
|
||||
ut.FailNow(err.Error())
|
||||
}
|
||||
resp, err := client.StoreWithReader(ctx, attIo)
|
||||
if err != nil {
|
||||
ut.FailNow(err.Error())
|
||||
}
|
||||
ut.Equal(resp, api.UploadResponse{Gitoid: "test"})
|
||||
}
|
||||
|
||||
// Test Suite: UT HTTPClientStore
|
||||
type UTHTTPClientGraphQLSuite struct {
|
||||
suite.Suite
|
||||
}
|
||||
|
||||
func TestAPIGraphQLSuite(t *testing.T) {
|
||||
suite.Run(t, new(UTHTTPClientGraphQLSuite))
|
||||
}
|
||||
|
||||
func (ut *UTHTTPClientGraphQLSuite) Test_GraphQLRetrieveSubjectResults() {
|
||||
expected := api.GraphQLResponseGeneric[api.RetrieveSubjectResults]{
|
||||
Data: api.RetrieveSubjectResults{
|
||||
Subjects: api.Subjects{
|
||||
Edges: []api.SubjectEdge{
|
||||
{
|
||||
Node: api.SubjectNode{
|
||||
Name: "test_Gitoid",
|
||||
SubjectDigests: []api.SubjectDigest{
|
||||
{
|
||||
Algorithm: "test_Gitoid",
|
||||
Value: "test_Gitoid",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Errors: []api.GraphQLError{},
|
||||
}
|
||||
testServer := httptest.NewServer(
|
||||
http.HandlerFunc(
|
||||
func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
if err := json.NewEncoder(w).Encode(expected); err != nil {
|
||||
ut.FailNow(err.Error())
|
||||
}
|
||||
},
|
||||
),
|
||||
)
|
||||
defer testServer.Close()
|
||||
ctx := context.TODO()
|
||||
client, err := httpclient.CreateArchivistaClient(http.DefaultClient, testServer.URL)
|
||||
if err != nil {
|
||||
ut.FailNow(err.Error())
|
||||
}
|
||||
actual, err := client.GraphQLRetrieveSubjectResults(ctx, "test_Gitoid")
|
||||
ut.NoError(err)
|
||||
ut.Equal(expected.Data, actual)
|
||||
}
|
||||
|
||||
func (ut *UTHTTPClientGraphQLSuite) Test_GraphQLSearchResults() {
|
||||
expected := api.GraphQLResponseGeneric[api.SearchResults]{
|
||||
Data: api.SearchResults{
|
||||
Dsses: api.DSSES{
|
||||
Edges: []api.SearchEdge{
|
||||
{
|
||||
Node: api.SearchNode{
|
||||
GitoidSha256: "test_Gitoid",
|
||||
Statement: api.Statement{
|
||||
AttestationCollection: api.AttestationCollection{
|
||||
Name: "test_Gitoid",
|
||||
Attestations: []api.Attestation{
|
||||
{
|
||||
Type: "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Errors: []api.GraphQLError{},
|
||||
}
|
||||
testServer := httptest.NewServer(
|
||||
http.HandlerFunc(
|
||||
func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
if err := json.NewEncoder(w).Encode(expected); err != nil {
|
||||
ut.FailNow(err.Error())
|
||||
}
|
||||
},
|
||||
),
|
||||
)
|
||||
defer testServer.Close()
|
||||
ctx := context.TODO()
|
||||
client, err := httpclient.CreateArchivistaClient(http.DefaultClient, testServer.URL)
|
||||
if err != nil {
|
||||
ut.FailNow(err.Error())
|
||||
}
|
||||
actual, err := client.GraphQLRetrieveSearchResults(ctx, "test_Gitoid", "test_Gitoid")
|
||||
ut.NoError(err)
|
||||
ut.Equal(expected.Data, actual)
|
||||
}
|
|
@ -50,7 +50,7 @@ func (ut *UTFileStoreSuite) TearDownTest() {
|
|||
}
|
||||
func (ut *UTFileStoreSuite) Test_Get() {
|
||||
|
||||
store, _, err := filestore.New(context.Background(), ut.tempDir, "")
|
||||
store, _, err := filestore.New(context.Background(), ut.tempDir, ":50025")
|
||||
if err != nil {
|
||||
ut.FailNow(err.Error())
|
||||
}
|
||||
|
|
|
@ -0,0 +1,92 @@
|
|||
// Copyright 2024 The Archivista Contributors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package dapr
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/in-toto/archivista/pkg/config"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type DaprHttp struct {
|
||||
Client *http.Client
|
||||
Host string
|
||||
HttpPort string
|
||||
PubsubComponentName string
|
||||
PubsubTopic string
|
||||
Url string
|
||||
}
|
||||
|
||||
type daprPayload struct {
|
||||
Gitoid string
|
||||
Payload []byte
|
||||
}
|
||||
|
||||
type Publisher interface {
|
||||
Publish(ctx context.Context, gitoid string, payload []byte) error
|
||||
}
|
||||
|
||||
func (d *DaprHttp) Publish(ctx context.Context, gitoid string, payload []byte) error {
|
||||
if d.Client == nil {
|
||||
d.Client = &http.Client{
|
||||
Timeout: 15 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
if d.Url == "" {
|
||||
d.Url = d.Host + ":" + d.HttpPort +
|
||||
"/v1.0/publish/" + d.PubsubComponentName + "/" + d.PubsubTopic
|
||||
}
|
||||
|
||||
dp := daprPayload{
|
||||
Gitoid: gitoid,
|
||||
Payload: payload,
|
||||
}
|
||||
// Marshal the message to JSON
|
||||
msgBytes, err := json.Marshal(dp)
|
||||
if err != nil {
|
||||
logrus.Error(err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
res, err := d.Client.Post(d.Url, "application/json", bytes.NewReader(msgBytes))
|
||||
if err != nil {
|
||||
logrus.Error(err.Error())
|
||||
return err
|
||||
}
|
||||
if res.StatusCode != http.StatusNoContent {
|
||||
logrus.Printf("failed to publish message: %s", res.Body)
|
||||
return fmt.Errorf("failed to publish message: %s", res.Body)
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewPublisher(config *config.Config) Publisher {
|
||||
daprPublisher := &DaprHttp{
|
||||
Host: config.PublisherDaprHost,
|
||||
HttpPort: config.PublisherDaprPort,
|
||||
PubsubComponentName: config.PublisherDaprComponentName,
|
||||
PubsubTopic: config.PublisherDaprTopic,
|
||||
Url: config.PublisherDaprURL,
|
||||
}
|
||||
return daprPublisher
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
// Copyright 2024 The Archivista Contributors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
package publisherstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/in-toto/archivista/pkg/config"
|
||||
"github.com/in-toto/archivista/pkg/publisherstore/dapr"
|
||||
"github.com/in-toto/archivista/pkg/publisherstore/rstuf"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type Publisher interface {
|
||||
Publish(ctx context.Context, gitoid string, payload []byte) error
|
||||
}
|
||||
|
||||
func New(config *config.Config) []Publisher {
|
||||
var publisherStore []Publisher
|
||||
for _, pubType := range config.Publisher {
|
||||
pubType = strings.ToUpper(pubType) // Normalize the input
|
||||
switch pubType {
|
||||
case "DAPR":
|
||||
publisherStore = append(publisherStore, dapr.NewPublisher(config))
|
||||
logrus.Info("Using publisher: DAPR")
|
||||
|
||||
case "RSTUF":
|
||||
publisherStore = append(publisherStore, rstuf.NewPublisher(config))
|
||||
logrus.Info("Using publisher: RSTUF")
|
||||
default:
|
||||
logrus.Errorf("unsupported publisher type: %s", pubType)
|
||||
}
|
||||
}
|
||||
return publisherStore
|
||||
}
|
|
@ -0,0 +1,124 @@
|
|||
// Copyright 2024 The Archivista Contributors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package rstuf
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
|
||||
"github.com/in-toto/archivista/pkg/config"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type RSTUF struct {
|
||||
Host string
|
||||
}
|
||||
|
||||
type Publisher interface {
|
||||
Publish(ctx context.Context, gitoid string, payload []byte) error
|
||||
}
|
||||
|
||||
func (r *RSTUF) parseRSTUFPayload(gitoid string, payload []byte) ([]byte, error) {
|
||||
objHash := sha256.Sum256(payload)
|
||||
// custom := make(map[string]any)
|
||||
// custom["gitoid"] = gitoid
|
||||
artifacts := []Artifact{
|
||||
{
|
||||
Path: gitoid,
|
||||
Info: ArtifactInfo{
|
||||
Length: len(payload),
|
||||
Hashes: Hashes{
|
||||
Sha256: hex.EncodeToString(objHash[:]),
|
||||
},
|
||||
// Custom: custom,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
artifactPayload := ArtifactPayload{
|
||||
Artifacts: artifacts,
|
||||
AddTaskIDToCustom: false,
|
||||
PublishTargets: true,
|
||||
}
|
||||
|
||||
payloadBytes, err := json.Marshal(artifactPayload)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error marshaling payload: %v", err)
|
||||
}
|
||||
return payloadBytes, nil
|
||||
}
|
||||
|
||||
func (r *RSTUF) Publish(ctx context.Context, gitoid string, payload []byte) error {
|
||||
// this publisher allows integration with the RSTUF project to store
|
||||
// the attestation and policy in the TUF metadata.
|
||||
// this TUF metadata can be used to build truste when distributing the
|
||||
// attestations and policies.
|
||||
// Convert payload to JSON
|
||||
url := r.Host + "/api/v1/artifacts"
|
||||
|
||||
payloadBytes, err := r.parseRSTUFPayload(gitoid, payload)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing payload: %v", err)
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", url, bytes.NewBuffer(payloadBytes))
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating request: %v", err)
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
// Add any additional headers or authentication if needed
|
||||
|
||||
client := &http.Client{}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
logrus.Errorf("error making request: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusAccepted {
|
||||
logb, _ := httputil.DumpResponse(resp, true)
|
||||
logrus.Errorf("error body from RSTUF: %v", string(logb))
|
||||
return fmt.Errorf("error response from RSTUF: %v", err)
|
||||
}
|
||||
|
||||
// Handle the response as needed
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
logrus.Errorf("error reading response body: %v", err)
|
||||
}
|
||||
|
||||
response := Response{}
|
||||
err = json.Unmarshal(body, &response)
|
||||
if err != nil {
|
||||
logrus.Errorf("error unmarshaling response: %v", err)
|
||||
}
|
||||
logrus.Debugf("RSTUF task id: %v", response.Data.TaskId)
|
||||
// TODO: monitor RSTUF task id for completion
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewPublisher(config *config.Config) Publisher {
|
||||
return &RSTUF{
|
||||
Host: config.PublisherRstufHost,
|
||||
}
|
||||
}
|
|
@ -0,0 +1,51 @@
|
|||
// Copyright 2024 The Archivista Contributors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package rstuf
|
||||
|
||||
// Hashes represents the Hashes structure
|
||||
type Hashes struct {
|
||||
Sha256 string `json:"sha256"`
|
||||
}
|
||||
|
||||
// ArtifactInfo represents the ArtifactInfo structure
|
||||
type ArtifactInfo struct {
|
||||
Length int `json:"length"`
|
||||
Hashes Hashes `json:"hashes"`
|
||||
Custom map[string]any `json:"custom,omitempty"`
|
||||
}
|
||||
|
||||
// Artifact represents the Artifact structure
|
||||
type Artifact struct {
|
||||
Path string `json:"path"`
|
||||
Info ArtifactInfo `json:"info"`
|
||||
}
|
||||
|
||||
// ArtifactPayload represents the payload structure
|
||||
type ArtifactPayload struct {
|
||||
Artifacts []Artifact `json:"artifacts"`
|
||||
AddTaskIDToCustom bool `json:"add_task_id_to_custom"`
|
||||
PublishTargets bool `json:"publish_targets"`
|
||||
}
|
||||
|
||||
type ArtifactsResponse struct {
|
||||
Artifacts []string `json:"artifacts"`
|
||||
TaskId string `json:"task_id"`
|
||||
LastUpdate string `json:"last_update"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
type Response struct {
|
||||
Data ArtifactsResponse `json:"data"`
|
||||
}
|
|
@ -29,6 +29,7 @@ import (
|
|||
|
||||
"entgo.io/contrib/entgql"
|
||||
"github.com/99designs/gqlgen/graphql/handler"
|
||||
"github.com/99designs/gqlgen/graphql/handler/transport"
|
||||
"github.com/99designs/gqlgen/graphql/playground"
|
||||
"github.com/edwarnicke/gitoid"
|
||||
"github.com/gorilla/mux"
|
||||
|
@ -38,16 +39,18 @@ import (
|
|||
"github.com/in-toto/archivista/pkg/api"
|
||||
"github.com/in-toto/archivista/pkg/artifactstore"
|
||||
"github.com/in-toto/archivista/pkg/config"
|
||||
"github.com/in-toto/archivista/pkg/publisherstore"
|
||||
"github.com/sirupsen/logrus"
|
||||
httpSwagger "github.com/swaggo/http-swagger/v2"
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
metadataStore Storer
|
||||
objectStore StorerGetter
|
||||
artifactStore artifactstore.Store
|
||||
router *mux.Router
|
||||
sqlClient *ent.Client
|
||||
metadataStore Storer
|
||||
objectStore StorerGetter
|
||||
artifactStore artifactstore.Store
|
||||
router *mux.Router
|
||||
sqlClient *ent.Client
|
||||
publisherStore []publisherstore.Publisher
|
||||
}
|
||||
|
||||
type Storer interface {
|
||||
|
@ -89,6 +92,12 @@ func WithArtifactStore(wds artifactstore.Store) Option {
|
|||
}
|
||||
}
|
||||
|
||||
func WithPublishers(pub []publisherstore.Publisher) Option {
|
||||
return func(s *Server) {
|
||||
s.publisherStore = pub
|
||||
}
|
||||
}
|
||||
|
||||
func New(cfg *config.Config, opts ...Option) (Server, error) {
|
||||
r := mux.NewRouter()
|
||||
s := Server{
|
||||
|
@ -102,14 +111,14 @@ func New(cfg *config.Config, opts ...Option) (Server, error) {
|
|||
// TODO: remove from future version (v0.6.0) endpoint with version
|
||||
r.HandleFunc("/download/{gitoid}", s.DownloadHandler)
|
||||
r.HandleFunc("/upload", s.UploadHandler)
|
||||
if cfg.EnableGraphql {
|
||||
if cfg.EnableSQLStore && cfg.EnableGraphql {
|
||||
r.Handle("/query", s.Query(s.sqlClient))
|
||||
r.Handle("/v1/query", s.Query(s.sqlClient))
|
||||
}
|
||||
|
||||
r.HandleFunc("/v1/download/{gitoid}", s.DownloadHandler)
|
||||
r.HandleFunc("/v1/upload", s.UploadHandler)
|
||||
if cfg.GraphqlWebClientEnable {
|
||||
if cfg.EnableSQLStore && cfg.EnableGraphql && cfg.GraphqlWebClientEnable {
|
||||
r.Handle("/",
|
||||
playground.Handler("Archivista", "/v1/query"),
|
||||
)
|
||||
|
@ -163,9 +172,20 @@ func (s *Server) Upload(ctx context.Context, r io.Reader) (api.UploadResponse, e
|
|||
}
|
||||
}
|
||||
|
||||
if err := s.metadataStore.Store(ctx, gid.String(), payload); err != nil {
|
||||
logrus.Errorf("received error from metadata store: %+v", err)
|
||||
return api.UploadResponse{}, err
|
||||
if s.metadataStore != nil {
|
||||
if err := s.metadataStore.Store(ctx, gid.String(), payload); err != nil {
|
||||
logrus.Errorf("received error from metadata store: %+v", err)
|
||||
return api.UploadResponse{}, err
|
||||
}
|
||||
}
|
||||
|
||||
if s.publisherStore != nil {
|
||||
for _, publisher := range s.publisherStore {
|
||||
// TODO: Make publish asynchrouns and use goroutine
|
||||
if err := publisher.Publish(ctx, gid.String(), payload); err != nil {
|
||||
logrus.Errorf("received error from publisher: %+v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return api.UploadResponse{Gitoid: gid.String()}, nil
|
||||
|
@ -272,7 +292,9 @@ func (s *Server) DownloadHandler(w http.ResponseWriter, r *http.Request) {
|
|||
// @Tags graphql
|
||||
// @Router /v1/query [post]
|
||||
func (s *Server) Query(sqlclient *ent.Client) *handler.Server {
|
||||
srv := handler.NewDefaultServer(archivista.NewSchema(sqlclient))
|
||||
srv := handler.New(archivista.NewSchema(sqlclient))
|
||||
srv.AddTransport(transport.GET{})
|
||||
srv.AddTransport(transport.POST{})
|
||||
srv.Use(entgql.Transactioner{TxOpener: sqlclient})
|
||||
return srv
|
||||
}
|
||||
|
@ -304,7 +326,6 @@ func (s *Server) AllArtifactsHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
}
|
||||
|
||||
// @Summary List Artifact Versions
|
||||
|
@ -465,7 +486,7 @@ func (s *Server) DownloadArtifactHandler(w http.ResponseWriter, r *http.Request)
|
|||
|
||||
defer func() {
|
||||
if err := file.Close(); err != nil {
|
||||
logrus.Errorf(fmt.Sprintf("failed to close artifact distribution file %s: %+v", distro.FileLocation, err))
|
||||
logrus.Errorf("failed to close artifact distribution file %s: %+v", distro.FileLocation, err)
|
||||
}
|
||||
}()
|
||||
|
||||
|
|
|
@ -117,6 +117,7 @@ func (ut *UTServerSuite) Test_New() {
|
|||
cfg := new(config.Config)
|
||||
cfg.EnableGraphql = true
|
||||
cfg.GraphqlWebClientEnable = true
|
||||
cfg.EnableSQLStore = true
|
||||
var err error
|
||||
ut.testServer, err = New(cfg, WithMetadataStore(ut.mockedStorer), WithObjectStore(ut.mockedStorerGetter))
|
||||
ut.NoError(err)
|
||||
|
@ -133,7 +134,6 @@ func (ut *UTServerSuite) Test_New() {
|
|||
allPaths = append(allPaths, pathTemplate)
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
ut.FailNow(err.Error())
|
||||
}
|
||||
|
@ -151,6 +151,7 @@ func (ut *UTServerSuite) Test_New_EnableGraphQL_False() {
|
|||
cfg := new(config.Config)
|
||||
cfg.EnableGraphql = false
|
||||
cfg.GraphqlWebClientEnable = true
|
||||
cfg.EnableSQLStore = true
|
||||
var err error
|
||||
ut.testServer, err = New(cfg, WithMetadataStore(ut.mockedStorer), WithObjectStore(ut.mockedStorerGetter))
|
||||
ut.NoError(err)
|
||||
|
@ -167,7 +168,6 @@ func (ut *UTServerSuite) Test_New_EnableGraphQL_False() {
|
|||
allPaths = append(allPaths, pathTemplate)
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
ut.FailNow(err.Error())
|
||||
}
|
||||
|
@ -177,7 +177,6 @@ func (ut *UTServerSuite) Test_New_EnableGraphQL_False() {
|
|||
ut.Contains(allPaths, "/v1/download/{gitoid}")
|
||||
ut.Contains(allPaths, "/v1/upload")
|
||||
ut.NotContains(allPaths, "/v1/query")
|
||||
ut.Contains(allPaths, "/")
|
||||
ut.Contains(allPaths, "/swagger/")
|
||||
}
|
||||
|
||||
|
@ -185,6 +184,7 @@ func (ut *UTServerSuite) Test_New_GraphqlWebClientEnable_False() {
|
|||
cfg := new(config.Config)
|
||||
cfg.EnableGraphql = true
|
||||
cfg.GraphqlWebClientEnable = false
|
||||
cfg.EnableSQLStore = true
|
||||
var err error
|
||||
ut.testServer, err = New(cfg, WithMetadataStore(ut.mockedStorer), WithObjectStore(ut.mockedStorerGetter))
|
||||
ut.NoError(err)
|
||||
|
@ -201,7 +201,6 @@ func (ut *UTServerSuite) Test_New_GraphqlWebClientEnable_False() {
|
|||
allPaths = append(allPaths, pathTemplate)
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
ut.FailNow(err.Error())
|
||||
}
|
||||
|
@ -264,7 +263,6 @@ func (ut *UTServerSuite) Test_Upload_FailedMetadatStprage() {
|
|||
}
|
||||
|
||||
func (ut *UTServerSuite) Test_UploadHandler() {
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
requestBody := []byte("fakePayload")
|
||||
request := httptest.NewRequest(http.MethodPost, "/v1/upload", bytes.NewBuffer(requestBody))
|
||||
|
@ -277,7 +275,6 @@ func (ut *UTServerSuite) Test_UploadHandler() {
|
|||
}
|
||||
|
||||
func (ut *UTServerSuite) Test_UploadHandler_WrongMethod() {
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
requestBody := []byte("fakePayload")
|
||||
request := httptest.NewRequest(http.MethodGet, "/upload", bytes.NewBuffer(requestBody))
|
||||
|
@ -291,7 +288,6 @@ func (ut *UTServerSuite) Test_UploadHandler_WrongMethod() {
|
|||
}
|
||||
|
||||
func (ut *UTServerSuite) Test_UploadHandler_FailureUpload() {
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
requestBody := []byte("fakePayload")
|
||||
request := httptest.NewRequest(http.MethodPost, "/upload", bytes.NewBuffer(requestBody))
|
||||
|
|
|
@ -30,6 +30,7 @@ import (
|
|||
"github.com/in-toto/archivista/pkg/metadatastorage/sqlstore"
|
||||
"github.com/in-toto/archivista/pkg/objectstorage/blobstore"
|
||||
"github.com/in-toto/archivista/pkg/objectstorage/filestore"
|
||||
"github.com/in-toto/archivista/pkg/publisherstore"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
@ -53,10 +54,11 @@ type ArchivistaService struct {
|
|||
// Setup Archivista Service
|
||||
func (a *ArchivistaService) Setup() (*Server, error) {
|
||||
var (
|
||||
level logrus.Level
|
||||
err error
|
||||
sqlStore *sqlstore.Store
|
||||
fileStore StorerGetter
|
||||
level logrus.Level
|
||||
err error
|
||||
sqlStore *sqlstore.Store
|
||||
fileStore StorerGetter
|
||||
publisherStore []publisherstore.Publisher
|
||||
)
|
||||
serverOpts := make([]Option, 0)
|
||||
|
||||
|
@ -95,29 +97,37 @@ func (a *ArchivistaService) Setup() (*Server, error) {
|
|||
}
|
||||
serverOpts = append(serverOpts, WithObjectStore(fileStore))
|
||||
|
||||
entClient, err := sqlstore.NewEntClient(
|
||||
a.Cfg.SQLStoreBackend,
|
||||
a.Cfg.SQLStoreConnectionString,
|
||||
sqlstore.ClientWithMaxIdleConns(a.Cfg.SQLStoreMaxIdleConnections),
|
||||
sqlstore.ClientWithMaxOpenConns(a.Cfg.SQLStoreMaxOpenConnections),
|
||||
sqlstore.ClientWithConnMaxLifetime(a.Cfg.SQLStoreConnectionMaxLifetime),
|
||||
)
|
||||
if a.Cfg.EnableSQLStore {
|
||||
entClient, err := sqlstore.NewEntClient(
|
||||
a.Cfg.SQLStoreBackend,
|
||||
a.Cfg.SQLStoreConnectionString,
|
||||
sqlstore.ClientWithMaxIdleConns(a.Cfg.SQLStoreMaxIdleConnections),
|
||||
sqlstore.ClientWithMaxOpenConns(a.Cfg.SQLStoreMaxOpenConnections),
|
||||
sqlstore.ClientWithConnMaxLifetime(a.Cfg.SQLStoreConnectionMaxLifetime),
|
||||
)
|
||||
if err != nil {
|
||||
logrus.Fatalf("could not create ent client: %+v", err)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
logrus.Fatalf("could not create ent client: %+v", err)
|
||||
// Continue with the existing setup code for the SQLStore
|
||||
sqlStore, a.sqlStoreCh, err = sqlstore.New(context.Background(), entClient)
|
||||
if err != nil {
|
||||
logrus.Fatalf("error initializing new SQLStore: %+v", err)
|
||||
}
|
||||
serverOpts = append(serverOpts, WithMetadataStore(sqlStore))
|
||||
|
||||
// Add SQL client for ent
|
||||
sqlClient := sqlStore.GetClient()
|
||||
serverOpts = append(serverOpts, WithEntSqlClient(sqlClient))
|
||||
} else {
|
||||
sqlStoreChan := make(chan error)
|
||||
a.sqlStoreCh = sqlStoreChan
|
||||
go func() {
|
||||
<-a.Ctx.Done()
|
||||
close(sqlStoreChan)
|
||||
}()
|
||||
}
|
||||
|
||||
// Continue with the existing setup code for the SQLStore
|
||||
sqlStore, a.sqlStoreCh, err = sqlstore.New(context.Background(), entClient)
|
||||
if err != nil {
|
||||
logrus.Fatalf("error initializing new SQLStore: %+v", err)
|
||||
}
|
||||
serverOpts = append(serverOpts, WithMetadataStore(sqlStore))
|
||||
|
||||
// Add SQL client for ent
|
||||
sqlClient := sqlStore.GetClient()
|
||||
serverOpts = append(serverOpts, WithEntSqlClient(sqlClient))
|
||||
|
||||
// initialize the artifact store
|
||||
if a.Cfg.EnableArtifactStore {
|
||||
wds, err := artifactstore.New(artifactstore.WithConfigFile(a.Cfg.ArtifactStoreConfig))
|
||||
|
@ -128,20 +138,17 @@ func (a *ArchivistaService) Setup() (*Server, error) {
|
|||
serverOpts = append(serverOpts, WithArtifactStore(wds))
|
||||
}
|
||||
|
||||
if a.Cfg.Publisher != nil {
|
||||
publisherStore = publisherstore.New(a.Cfg)
|
||||
serverOpts = append(serverOpts, WithPublishers(publisherStore))
|
||||
}
|
||||
// Create the Archivista server with all options
|
||||
server, err := New(a.Cfg, serverOpts...)
|
||||
if err != nil {
|
||||
logrus.Fatalf("could not create archivista server: %+v", err)
|
||||
}
|
||||
|
||||
// Ensure background processes are managed
|
||||
go func() {
|
||||
<-a.sqlStoreCh
|
||||
<-a.fileStoreCh
|
||||
}()
|
||||
|
||||
logrus.WithField("duration", time.Since(now)).Infof("completed phase: initializing storage clients")
|
||||
|
||||
return &server, nil
|
||||
}
|
||||
|
||||
|
@ -162,11 +169,13 @@ func (a *ArchivistaService) initObjectStore() (StorerGetter, <-chan error, error
|
|||
|
||||
case "BLOB":
|
||||
var creds *credentials.Credentials
|
||||
if a.Cfg.BlobStoreCredentialType == "IAM" {
|
||||
|
||||
switch a.Cfg.BlobStoreCredentialType {
|
||||
case "IAM":
|
||||
creds = credentials.NewIAM("")
|
||||
} else if a.Cfg.BlobStoreCredentialType == "ACCESS_KEY" {
|
||||
case "ACCESS_KEY":
|
||||
creds = credentials.NewStaticV4(a.Cfg.BlobStoreAccessKeyId, a.Cfg.BlobStoreSecretAccessKeyId, "")
|
||||
} else {
|
||||
default:
|
||||
logrus.Fatalf("invalid blob store credential type: %s", a.Cfg.BlobStoreCredentialType)
|
||||
}
|
||||
return blobstore.New(
|
||||
|
|
Loading…
Reference in New Issue