mirror of https://github.com/wasmCloud/wadm.git
Compare commits
326 Commits
client-v0.
...
main
Author | SHA1 | Date |
---|---|---|
|
50d41559ab | |
|
a8ea265933 | |
|
dad082b6c7 | |
|
6271e697ed | |
|
b1dd4e650a | |
|
5e7e3eddb2 | |
|
7f3652c9b4 | |
|
7948300cf5 | |
|
6eb78120b7 | |
|
18464c2ac8 | |
|
34b054122b | |
|
41f01ba0df | |
|
c6f6b44b51 | |
|
d3a82c8b2b | |
|
4c8c73e603 | |
|
30f49b6cef | |
|
66502de4f0 | |
|
e0ec996d4d | |
|
8515084c01 | |
|
8c037d3406 | |
|
abf0702404 | |
|
67d8b25f27 | |
|
b376c3ae2b | |
|
eec6ca1c03 | |
|
cf9ef590b3 | |
|
2009753535 | |
|
6ffc096379 | |
|
62b573183b | |
|
254765a5db | |
|
9ad8b52ffe | |
|
cc394fb963 | |
|
4f0be1c2ec | |
|
c6177f1ec0 | |
|
9ab6ef3f3a | |
|
aab70fa276 | |
|
04862520cb | |
|
d24a275f69 | |
|
dc85b32bed | |
|
a5a61d2749 | |
|
c065b3e17e | |
|
4239d6d898 | |
|
d240b53a5d | |
|
4e014223b8 | |
|
96aa54bd5e | |
|
67b1d85ba9 | |
|
b5133163ae | |
|
d5a77cc74c | |
|
ef80b684ba | |
|
ee40750113 | |
|
73dc76b72a | |
|
aac1e46d0b | |
|
e843cfb824 | |
|
0ef3162684 | |
|
726a6c0bc7 | |
|
f1a3acbf1e | |
|
e92e526dfe | |
|
15ae8c4d6a | |
|
22fc78860f | |
|
c7953f95e9 | |
|
7f0fc3a396 | |
|
37b47154e3 | |
|
3c8b0742a5 | |
|
8a3d21ce7d | |
|
c09d40d335 | |
|
0748b04b60 | |
|
dc1955370f | |
|
ebd113e51a | |
|
8def8fe075 | |
|
1ae4e8e2cb | |
|
db80173177 | |
|
6b4946dd32 | |
|
897192b894 | |
|
d715170d01 | |
|
8a1cd9e8e4 | |
|
93fbb9f4a3 | |
|
6e57d6f197 | |
|
b3ebcd2e2a | |
|
6c8dd444ba | |
|
005d599bcd | |
|
86af1498cb | |
|
60f0014449 | |
|
a329be44a3 | |
|
14f7ed1bab | |
|
39b79638ad | |
|
ac747cd8bc | |
|
77f33f08f6 | |
|
130c8f4a70 | |
|
e9f017b809 | |
|
1365854fbb | |
|
8164b443fc | |
|
445622df2e | |
|
e218cdae70 | |
|
f74f7f8f54 | |
|
734c726f14 | |
|
0fba847245 | |
|
a2c022b462 | |
|
4db8763a0f | |
|
7958bfbced | |
|
37eb784b82 | |
|
16191d081a | |
|
a5424b7e4c | |
|
2e3abbcba0 | |
|
720113d026 | |
|
80bba4fb9f | |
|
2e474c5d0c | |
|
ceda608718 | |
|
6b9d6fd26f | |
|
44753eb992 | |
|
c5694226c8 | |
|
c808f7a07a | |
|
eaebdd918e | |
|
e756aa038f | |
|
ba04447356 | |
|
386eebd33f | |
|
1926bf070f | |
|
ddb912553a | |
|
bdf06dc5d9 | |
|
ffc655e749 | |
|
7218266206 | |
|
cb00233aaa | |
|
7a94b8565c | |
|
66ca4cc9f5 | |
|
c8e715a088 | |
|
a5066c16dd | |
|
e4de5fc83e | |
|
b26427c3ec | |
|
2113aa3781 | |
|
55444f27f2 | |
|
797eddf5c1 | |
|
55be7d8558 | |
|
7d59eb4746 | |
|
4bb74d04fe | |
|
1f902b248c | |
|
34fb5e69b2 | |
|
efeb6a020d | |
|
e492823998 | |
|
ad2cb51238 | |
|
95633628af | |
|
9fbc598eff | |
|
830b02545a | |
|
9475e4c542 | |
|
84d4f48783 | |
|
95d256215b | |
|
7e97f6e615 | |
|
bcc2b7f461 | |
|
2aa35a9514 | |
|
f504e8c1b2 | |
|
7658a4e654 | |
|
64e3d93118 | |
|
41e6e352cc | |
|
d169b1be62 | |
|
4676947211 | |
|
78e077604e | |
|
a7a287ce7b | |
|
90dac77412 | |
|
ab9ad612ee | |
|
18a66b2640 | |
|
13faa57248 | |
|
b167486f48 | |
|
52500b4787 | |
|
8df7924598 | |
|
59e7e66562 | |
|
f88140893b | |
|
77f5bc8961 | |
|
e67c9e580c | |
|
4243efdc8f | |
|
40d8b50c0e | |
|
5a4c13fe75 | |
|
b6b398ecd7 | |
|
6fc79d3c81 | |
|
7a811a6737 | |
|
1448671649 | |
|
f596dadcb8 | |
|
ca868c5f79 | |
|
11aa88b73f | |
|
6b768c1607 | |
|
c26eb6d2fd | |
|
f34b19a79b | |
|
532e4930ef | |
|
6004c9a136 | |
|
4af2a727c3 | |
|
d92b0b7e6a | |
|
ab26db73b7 | |
|
229411893a | |
|
e2de3fe6b8 | |
|
062130e6f1 | |
|
df0bf72cde | |
|
dad1bd9f66 | |
|
a0da5ef75e | |
|
f1d68a87d5 | |
|
b67193a9f8 | |
|
764e90ba1b | |
|
50b672ad30 | |
|
265f732fc8 | |
|
b2a1082559 | |
|
341ae617ec | |
|
a6223a3f74 | |
|
38cb50f364 | |
|
2b50ef2877 | |
|
97e9e32066 | |
|
c2ae9f2643 | |
|
864acfd28e | |
|
994b881701 | |
|
2cc4092daa | |
|
e1d665416e | |
|
6e8eb504c9 | |
|
7d80eca6aa | |
|
54bf5cbb61 | |
|
65cfd337f6 | |
|
87c64bdcd9 | |
|
505debf7ff | |
|
c898e2eb20 | |
|
5919660776 | |
|
c1db5ff946 | |
|
163c28269a | |
|
e9c7cf4ab1 | |
|
f137a9ab60 | |
|
d9c3627547 | |
|
e8fe31f0ed | |
|
18e5566a5e | |
|
2561838039 | |
|
8c0ea8263d | |
|
ae8ab69f24 | |
|
61b81112bd | |
|
b2207ef41f | |
|
0cc63485f4 | |
|
31cf33a9b7 | |
|
fb2b74532b | |
|
ca5a63104a | |
|
21feab093f | |
|
eb6fce9255 | |
|
087203cdbc | |
|
6e35596a22 | |
|
2d47f32fc5 | |
|
2c00cada86 | |
|
d1b9d925d2 | |
|
db38c50600 | |
|
964a586ab6 | |
|
6c425a198c | |
|
0fb04cfee4 | |
|
066eccdbd2 | |
|
4bd2560bdd | |
|
57e1807be8 | |
|
ef32c26fa0 | |
|
1c4b706b17 | |
|
c48802566e | |
|
42cc8672d1 | |
|
9272799f62 | |
|
cebb511d28 | |
|
d0faba952d | |
|
f59cfa2f7d | |
|
0e78489a56 | |
|
466f6ff402 | |
|
bd2cc980c7 | |
|
955905148c | |
|
b9da5ee9f6 | |
|
81d41b3cd8 | |
|
fbf29a9350 | |
|
cfc7c4504a | |
|
6f29e72932 | |
|
9ac409a28d | |
|
1309c9bf1f | |
|
54740fbf62 | |
|
eb34a928c6 | |
|
4d2fc1a406 | |
|
08da607ad9 | |
|
9972d4d903 | |
|
b459bea3fb | |
|
b7ef888072 | |
|
aa2689ab36 | |
|
ec08ba7316 | |
|
471f07fe67 | |
|
0dbb3d102c | |
|
8830527b43 | |
|
434aeafbb8 | |
|
05d5242d27 | |
|
77c012d6d1 | |
|
3a066c35c6 | |
|
e07481a66c | |
|
4b7233af2c | |
|
e4d453fa34 | |
|
1e2bbc2111 | |
|
5fda091b50 | |
|
e0d4e23758 | |
|
1b768f8d20 | |
|
980d8ef926 | |
|
12880bf5e1 | |
|
75c45fa750 | |
|
51692b7156 | |
|
eb57ec900a | |
|
78caba43e1 | |
|
3e769f5708 | |
|
e39e1f1c63 | |
|
967c047f05 | |
|
8724621dc0 | |
|
1d085cab07 | |
|
fbf06f624e | |
|
f1ef62d6cd | |
|
a5486595a2 | |
|
5c4094c1c7 | |
|
55caf37442 | |
|
6521d4e2c4 | |
|
fa51184cfc | |
|
eb0b2eab9b | |
|
1136744fe6 | |
|
efe9a8a5f6 | |
|
ee427db054 | |
|
5ea118e235 | |
|
5719f0e57e | |
|
78343c264e | |
|
ee8f8ea555 | |
|
2d2320bc61 | |
|
71e3138355 | |
|
4c31bc24c1 | |
|
7aedd8ac5c | |
|
2b0dd9efec | |
|
c4a3c7978a | |
|
89c9e77f6e | |
|
fd75aaa8ef | |
|
c64f28dd03 | |
|
8011f09570 | |
|
5a22fd1258 | |
|
b1fb8894f6 | |
|
2e77266224 | |
|
1e2c90645d | |
|
b78d4bf1b6 |
|
@ -19,8 +19,7 @@
|
|||
},
|
||||
"extensions": [
|
||||
"rust-lang.rust-analyzer",
|
||||
"tamasfe.even-better-toml",
|
||||
"serayuzgur.crates"
|
||||
"tamasfe.even-better-toml"
|
||||
]
|
||||
}
|
||||
},
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
if ! has nix_direnv_version || ! nix_direnv_version 3.0.6; then
|
||||
source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.6/direnvrc" "sha256-RYcUJaRMf8oF5LznDrlCXbkOQrywm0HDv1VjYGaJGdM="
|
||||
fi
|
||||
watch_file rust-toolchain.toml
|
||||
use flake
|
|
@ -0,0 +1,38 @@
|
|||
name: Install and configure wkg (linux only)
|
||||
|
||||
inputs:
|
||||
wkg-version:
|
||||
description: version of wkg to install. Should be a valid tag from https://github.com/bytecodealliance/wasm-pkg-tools/releases
|
||||
default: "v0.6.0"
|
||||
oci-username:
|
||||
description: username for oci registry
|
||||
required: true
|
||||
oci-password:
|
||||
description: password for oci registry
|
||||
required: true
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Download wkg
|
||||
shell: bash
|
||||
run: |
|
||||
curl --fail -L https://github.com/bytecodealliance/wasm-pkg-tools/releases/download/${{ inputs.wkg-version }}/wkg-x86_64-unknown-linux-gnu -o wkg
|
||||
chmod +x wkg;
|
||||
echo "$(realpath .)" >> "$GITHUB_PATH";
|
||||
- name: Generate and set wkg config
|
||||
shell: bash
|
||||
env:
|
||||
WKG_OCI_USERNAME: ${{ inputs.oci-username }}
|
||||
WKG_OCI_PASSWORD: ${{ inputs.oci-password }}
|
||||
run: |
|
||||
cat << EOF > wkg-config.toml
|
||||
[namespace_registries]
|
||||
wasmcloud = "wasmcloud.com"
|
||||
wrpc = "bytecodealliance.org"
|
||||
wasi = "wasi.dev"
|
||||
|
||||
[registry."wasmcloud.com".oci]
|
||||
auth = { username = "${WKG_OCI_USERNAME}", password = "${WKG_OCI_PASSWORD}" }
|
||||
EOF
|
||||
echo "WKG_CONFIG_FILE=$(realpath wkg-config.toml)" >> $GITHUB_ENV
|
|
@ -0,0 +1,6 @@
|
|||
# .github/release.yml
|
||||
|
||||
changelog:
|
||||
exclude:
|
||||
authors:
|
||||
- dependabot
|
|
@ -2,6 +2,7 @@ name: chart
|
|||
|
||||
env:
|
||||
HELM_VERSION: v3.14.0
|
||||
CHART_TESTING_NAMESPACE: chart-testing
|
||||
|
||||
on:
|
||||
push:
|
||||
|
@ -12,12 +13,15 @@ on:
|
|||
- 'charts/**'
|
||||
- '.github/workflows/chart.yml'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
validate:
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
@ -26,18 +30,18 @@ jobs:
|
|||
git fetch origin main:main
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v4
|
||||
uses: azure/setup-helm@b9e51907a09c216f16ebe8536097933489208112 # v4.3.0
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
# Used by helm chart-testing below
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5.1.1
|
||||
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
|
||||
with:
|
||||
python-version: '3.12.2'
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.6.1
|
||||
uses: helm/chart-testing-action@0d28d3144d3a25ea2cc349d6e59901c4ff469b3b # v2.7.0
|
||||
with:
|
||||
version: v3.10.1
|
||||
yamllint_version: 1.35.1
|
||||
|
@ -48,7 +52,7 @@ jobs:
|
|||
ct lint --config charts/wadm/ct.yaml
|
||||
|
||||
- name: Create kind cluster
|
||||
uses: helm/kind-action@v1.10.0
|
||||
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
|
||||
with:
|
||||
version: "v0.22.0"
|
||||
|
||||
|
@ -56,24 +60,29 @@ jobs:
|
|||
run: |
|
||||
helm repo add nats https://nats-io.github.io/k8s/helm/charts/
|
||||
helm repo update
|
||||
helm install nats nats/nats -f charts/wadm/ci/nats.yaml
|
||||
helm install nats nats/nats -f charts/wadm/ci/nats.yaml --namespace ${{ env.CHART_TESTING_NAMESPACE }} --create-namespace
|
||||
|
||||
- name: Run chart-testing (install)
|
||||
- name: Run chart-testing install / same namespace
|
||||
run: |
|
||||
ct install --config charts/wadm/ct.yaml
|
||||
ct install --config charts/wadm/ct.yaml --namespace ${{ env.CHART_TESTING_NAMESPACE }}
|
||||
|
||||
- name: Run chart-testing install / across namespaces
|
||||
run: |
|
||||
ct install --config charts/wadm/ct.yaml --helm-extra-set-args "--set=wadm.config.nats.server=nats://nats-headless.${{ env.CHART_TESTING_NAMESPACE }}.svc.cluster.local"
|
||||
|
||||
publish:
|
||||
if: ${{ startsWith(github.ref, 'refs/tags/chart-v') }}
|
||||
runs-on: ubuntu-22.04
|
||||
needs: validate
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v4
|
||||
uses: azure/setup-helm@b9e51907a09c216f16ebe8536097933489208112 # v4.3.0
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
|
@ -82,7 +91,7 @@ jobs:
|
|||
helm package charts/wadm -d .helm-charts
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
|
|
|
@ -5,6 +5,9 @@ on:
|
|||
branches:
|
||||
- main
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: e2e
|
||||
|
@ -12,37 +15,42 @@ jobs:
|
|||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
# TODO: Re-enable the multitenant and upgrades tests in followup to #247
|
||||
# e2e_test: [e2e_multiple_hosts, e2e_multitenant, e2e_upgrades]
|
||||
e2e_test: [e2e_multiple_hosts, e2e_upgrades]
|
||||
test: [e2e_multiple_hosts, e2e_upgrades, e2e_shared]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install latest Rust stable toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
uses: dtolnay/rust-toolchain@1ff72ee08e3cb84d84adba594e0a297990fc1ed3 # stable
|
||||
with:
|
||||
toolchain: stable
|
||||
components: clippy, rustfmt
|
||||
|
||||
# Cache: rust
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
with:
|
||||
key: 'ubuntu-22.04-rust-cache'
|
||||
|
||||
# If the test uses a docker compose file, pre-emptively pull images used in docker compose
|
||||
- name: Pull images for test ${{ matrix.test }}
|
||||
shell: bash
|
||||
run: |
|
||||
export DOCKER_COMPOSE_FILE=tests/docker-compose-${{ matrix.test }}.yaml;
|
||||
[[ -f "$DOCKER_COMPOSE_FILE" ]] && docker compose -f $DOCKER_COMPOSE_FILE pull;
|
||||
|
||||
# Run e2e tests in a matrix for efficiency
|
||||
- name: Run tests ${{ matrix.e2e_test }}
|
||||
- name: Run tests ${{ matrix.test }}
|
||||
id: test
|
||||
env:
|
||||
WADM_E2E_TEST: ${{ matrix.e2e_test }}
|
||||
WADM_E2E_TEST: ${{ matrix.test }}
|
||||
run: make test-individual-e2e
|
||||
|
||||
# if the previous step fails, upload logs
|
||||
- name: Upload logs for debugging
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
if: ${{ failure() && steps.test.outcome == 'failure' }}
|
||||
with:
|
||||
name: e2e-logs-${{ matrix.e2e_test }}
|
||||
name: e2e-logs-${{ matrix.test }}
|
||||
path: ./tests/e2e_log/*
|
||||
# Be nice and only retain the logs for 7 days
|
||||
retention-days: 7
|
||||
|
|
|
@ -9,141 +9,132 @@ on:
|
|||
- 'client-v*'
|
||||
workflow_dispatch: # Allow manual creation of artifacts without a release
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: build release assets
|
||||
runs-on: ${{ matrix.config.os }}
|
||||
runs-on: ${{ matrix.config.runnerOs }}
|
||||
outputs:
|
||||
version_output: ${{ steps.version_output.outputs.version }}
|
||||
strategy:
|
||||
matrix:
|
||||
config:
|
||||
# NOTE: We are building on an older version of ubuntu because of libc compatibility
|
||||
# issues. Namely, if we build on a new version of libc, it isn't backwards compatible with
|
||||
# old versions. But if we build on the old version, it is compatible with the newer
|
||||
# versions running in ubuntu 22 and its ilk
|
||||
- {
|
||||
os: 'ubuntu-20.04',
|
||||
arch: 'amd64',
|
||||
extension: '',
|
||||
targetPath: 'target/release/',
|
||||
runnerOs: 'ubuntu-latest',
|
||||
buildCommand: 'cargo zigbuild',
|
||||
target: 'x86_64-unknown-linux-musl',
|
||||
uploadArtifactSuffix: 'linux-amd64',
|
||||
buildOutputPath: 'target/x86_64-unknown-linux-musl/release/wadm',
|
||||
}
|
||||
- {
|
||||
os: 'ubuntu-20.04',
|
||||
arch: 'aarch64',
|
||||
extension: '',
|
||||
targetPath: 'target/aarch64-unknown-linux-gnu/release/',
|
||||
runnerOs: 'ubuntu-latest',
|
||||
buildCommand: 'cargo zigbuild',
|
||||
target: 'aarch64-unknown-linux-musl',
|
||||
uploadArtifactSuffix: 'linux-aarch64',
|
||||
buildOutputPath: 'target/aarch64-unknown-linux-musl/release/wadm',
|
||||
}
|
||||
- {
|
||||
os: 'macos-13',
|
||||
arch: 'amd64',
|
||||
extension: '',
|
||||
targetPath: 'target/release/',
|
||||
runnerOs: 'macos-14',
|
||||
buildCommand: 'cargo zigbuild',
|
||||
target: 'x86_64-apple-darwin',
|
||||
uploadArtifactSuffix: 'macos-amd64',
|
||||
buildOutputPath: 'target/x86_64-apple-darwin/release/wadm',
|
||||
}
|
||||
- {
|
||||
os: 'windows-latest',
|
||||
arch: 'amd64',
|
||||
extension: '.exe',
|
||||
targetPath: 'target/release/',
|
||||
runnerOs: 'macos-14',
|
||||
buildCommand: 'cargo zigbuild',
|
||||
target: 'aarch64-apple-darwin',
|
||||
uploadArtifactSuffix: 'macos-aarch64',
|
||||
buildOutputPath: 'target/aarch64-apple-darwin/release/wadm',
|
||||
}
|
||||
- {
|
||||
os: 'macos-latest',
|
||||
arch: 'aarch64',
|
||||
extension: '',
|
||||
targetPath: 'target/release/',
|
||||
runnerOs: 'windows-latest',
|
||||
buildCommand: 'cargo build',
|
||||
target: 'x86_64-pc-windows-msvc',
|
||||
uploadArtifactSuffix: 'windows-amd64',
|
||||
buildOutputPath: 'target/x86_64-pc-windows-msvc/release/wadm.exe',
|
||||
}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: set the release version (tag)
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
shell: bash
|
||||
run: echo "RELEASE_VERSION=${GITHUB_REF/refs\/tags\//}" >> $GITHUB_ENV
|
||||
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
|
||||
run: |
|
||||
echo "RELEASE_VERSION=${GITHUB_REF/refs\/tags\//}" >> $GITHUB_ENV
|
||||
|
||||
- name: set the release version (main)
|
||||
if: github.ref == 'refs/heads/main'
|
||||
shell: bash
|
||||
run: echo "RELEASE_VERSION=canary" >> $GITHUB_ENV
|
||||
if: ${{ github.ref == 'refs/heads/main' }}
|
||||
run: |
|
||||
echo "RELEASE_VERSION=canary" >> $GITHUB_ENV
|
||||
|
||||
- name: Output Version
|
||||
id: version_output
|
||||
run: echo "version=$RELEASE_VERSION" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: lowercase the runner OS name
|
||||
shell: bash
|
||||
run: |
|
||||
OS=$(echo "${{ runner.os }}" | tr '[:upper:]' '[:lower:]')
|
||||
echo "RUNNER_OS=$OS" >> $GITHUB_ENV
|
||||
- name: Install Zig
|
||||
uses: mlugg/setup-zig@8d6198c65fb0feaa111df26e6b467fea8345e46f # v2.0.5
|
||||
with:
|
||||
version: 0.13.0
|
||||
|
||||
- name: Install latest Rust stable toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
if: matrix.config.arch != 'aarch64' || startsWith(matrix.config.os, 'macos')
|
||||
uses: dtolnay/rust-toolchain@1ff72ee08e3cb84d84adba594e0a297990fc1ed3 # stable
|
||||
with:
|
||||
toolchain: stable
|
||||
components: clippy, rustfmt
|
||||
target: ${{ matrix.config.target }}
|
||||
|
||||
- name: setup for cross-compile builds
|
||||
if: matrix.config.arch == 'aarch64' && matrix.config.os == 'ubuntu-20.04'
|
||||
- name: Install cargo zigbuild
|
||||
uses: taiki-e/install-action@2c73a741d1544cc346e9b0af11868feba03eb69d # v2.58.9
|
||||
with:
|
||||
tool: cargo-zigbuild
|
||||
|
||||
- name: Build wadm
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt install gcc-aarch64-linux-gnu g++-aarch64-linux-gnu
|
||||
rustup toolchain install stable-aarch64-unknown-linux-gnu
|
||||
rustup target add --toolchain stable-aarch64-unknown-linux-gnu aarch64-unknown-linux-gnu
|
||||
echo "CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=aarch64-linux-gnu-gcc" >> $GITHUB_ENV
|
||||
echo "CC_aarch64_unknown_linux_gnu=aarch64-linux-gnu-gcc" >> $GITHUB_ENV
|
||||
echo "CXX_aarch64_unknown_linux_gnu=aarch64-linux-gnu-g++" >> $GITHUB_ENV
|
||||
${{ matrix.config.buildCommand }} --release --bin wadm --target ${{ matrix.config.target }}
|
||||
|
||||
- name: Install latest Rust stable toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
if: matrix.config.arch == 'aarch64' && matrix.config.os == 'ubuntu-20.04'
|
||||
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
with:
|
||||
toolchain: stable
|
||||
components: clippy, rustfmt
|
||||
target: aarch64-unknown-linux-gnu
|
||||
|
||||
- name: build release (amd64 linux, macos, windows)
|
||||
if: matrix.config.arch != 'aarch64' || startsWith(matrix.config.os, 'macos')
|
||||
run: 'cargo build --release --bin wadm'
|
||||
|
||||
- name: build release (arm64 linux)
|
||||
if: matrix.config.arch == 'aarch64' && matrix.config.os == 'ubuntu-20.04'
|
||||
run: 'cargo build --release --bin wadm --target aarch64-unknown-linux-gnu'
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: wadm-${{ env.RELEASE_VERSION }}-${{ env.RUNNER_OS }}-${{ matrix.config.arch }}
|
||||
name: wadm-${{ env.RELEASE_VERSION }}-${{ matrix.config.uploadArtifactSuffix }}
|
||||
if-no-files-found: error
|
||||
path: |
|
||||
${{ matrix.config.targetPath }}wadm${{ matrix.config.extension }}
|
||||
${{ matrix.config.buildOutputPath }}
|
||||
|
||||
publish:
|
||||
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
|
||||
name: publish release assets
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
permissions:
|
||||
contents: write
|
||||
env:
|
||||
RELEASE_VERSION: ${{ needs.build.outputs.version_output }}
|
||||
steps:
|
||||
- name: download release assets
|
||||
uses: actions/download-artifact@v4
|
||||
- name: Generate Checksums
|
||||
- name: Download release assets
|
||||
uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
|
||||
|
||||
- name: Prepare release
|
||||
run: |
|
||||
for dir in */; do
|
||||
cd "$dir" || continue
|
||||
sum=$(sha256sum * | awk '{ print $1 }')
|
||||
echo "$dir:$sum" >> checksums-${{ env.RELEASE_VERSION }}.txt
|
||||
cd ..
|
||||
test -d "$dir" || continue
|
||||
tarball="${dir%/}.tar.gz"
|
||||
tar -czvf "${tarball}" "$dir"
|
||||
sha256sum "${tarball}" >> SHA256SUMS
|
||||
done
|
||||
- name: Package Binaries
|
||||
run: for dir in */; do tar -czvf "${dir%/}.tar.gz" "$dir"; done
|
||||
|
||||
- name: Create github release
|
||||
uses: softprops/action-gh-release@v2
|
||||
uses: softprops/action-gh-release@72f2c25fcb47643c292f7107632f7a47c1df5cd8 # v2.3.2
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
prerelease: false
|
||||
draft: false
|
||||
files: |
|
||||
checksums-${{ env.RELEASE_VERSION }}.txt
|
||||
SHA256SUMS
|
||||
wadm-${{ env.RELEASE_VERSION }}-linux-aarch64.tar.gz
|
||||
wadm-${{ env.RELEASE_VERSION }}-linux-amd64.tar.gz
|
||||
wadm-${{ env.RELEASE_VERSION }}-macos-aarch64.tar.gz
|
||||
|
@ -151,35 +142,38 @@ jobs:
|
|||
wadm-${{ env.RELEASE_VERSION }}-windows-amd64.tar.gz
|
||||
|
||||
crate:
|
||||
if: ${{ startsWith(github.ref, 'refs/tags/v') || startsWith(github.ref, 'refs/tags/types-v') || startsWith(github.ref, 'refs/tags/client-v') }}
|
||||
name: Publish crate
|
||||
runs-on: ubuntu-latest
|
||||
if: startsWith(github.ref, 'refs/tags/v') || startsWith(github.ref, 'refs/tags/types-v') || startsWith(github.ref, 'refs/tags/client-v')
|
||||
needs: build
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
- name: Install latest Rust stable toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
uses: dtolnay/rust-toolchain@1ff72ee08e3cb84d84adba594e0a297990fc1ed3 # stable
|
||||
with:
|
||||
toolchain: stable
|
||||
|
||||
- name: Cargo login
|
||||
run: cargo login ${{ secrets.CRATES_TOKEN }}
|
||||
shell: bash
|
||||
run: |
|
||||
cargo login ${{ secrets.CRATES_TOKEN }}
|
||||
|
||||
- name: Cargo publish wadm-types
|
||||
if: startsWith(github.ref, 'refs/tags/types-v')
|
||||
run: cargo publish
|
||||
if: ${{ startsWith(github.ref, 'refs/tags/types-v') }}
|
||||
working-directory: ./crates/wadm-types
|
||||
shell: bash
|
||||
run: |
|
||||
cargo publish
|
||||
|
||||
- name: Cargo publish wadm lib
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
run: cargo publish
|
||||
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
|
||||
working-directory: ./crates/wadm
|
||||
shell: bash
|
||||
run: |
|
||||
cargo publish
|
||||
|
||||
- name: Cargo publish wadm-client
|
||||
if: startsWith(github.ref, 'refs/tags/client-v')
|
||||
run: cargo publish
|
||||
if: ${{ startsWith(github.ref, 'refs/tags/client-v') }}
|
||||
working-directory: ./crates/wadm-client
|
||||
shell: bash
|
||||
run: |
|
||||
cargo publish
|
||||
|
||||
docker-image:
|
||||
name: Build and push docker images
|
||||
|
@ -191,28 +185,32 @@ jobs:
|
|||
env:
|
||||
RELEASE_VERSION: ${{ needs.build.outputs.version_output }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
- uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
|
||||
with:
|
||||
name: wadm-${{ env.RELEASE_VERSION }}-linux-aarch64
|
||||
path: ./artifacts
|
||||
- run: mv ./artifacts/wadm ./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-aarch64 && chmod +x ./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-aarch64
|
||||
pattern: '*linux*'
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: wadm-${{ env.RELEASE_VERSION }}-linux-amd64
|
||||
path: ./artifacts
|
||||
- run: mv ./artifacts/wadm ./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-amd64 && chmod +x ./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-amd64
|
||||
- name: Prepare container artifacts
|
||||
working-directory: ./artifacts
|
||||
run: |
|
||||
for dir in */; do
|
||||
name="${dir%/}"
|
||||
mv "${name}/wadm" wadm
|
||||
chmod +x wadm
|
||||
rmdir "${name}"
|
||||
mv wadm "${name}"
|
||||
done
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
|
@ -222,9 +220,14 @@ jobs:
|
|||
run: |
|
||||
echo "OWNER=${GITHUB_REPOSITORY_OWNER,,}" >>$GITHUB_ENV
|
||||
|
||||
- name: Set the formatted release version for the docker tag
|
||||
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
|
||||
run: |
|
||||
echo "RELEASE_VERSION_DOCKER_TAG=${RELEASE_VERSION#v}" >> $GITHUB_ENV
|
||||
|
||||
- name: Build and push (tag)
|
||||
uses: docker/build-push-action@v6
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
|
||||
with:
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
@ -232,11 +235,30 @@ jobs:
|
|||
build-args: |
|
||||
BIN_ARM64=./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-aarch64
|
||||
BIN_AMD64=./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-amd64
|
||||
tags: ghcr.io/${{ env.OWNER }}/wadm:latest,ghcr.io/${{ env.OWNER }}/wadm:${{ env.RELEASE_VERSION }}
|
||||
tags: |
|
||||
ghcr.io/${{ env.OWNER }}/wadm:latest
|
||||
ghcr.io/${{ env.OWNER }}/wadm:${{ env.RELEASE_VERSION }},
|
||||
ghcr.io/${{ env.OWNER }}/wadm:${{ env.RELEASE_VERSION_DOCKER_TAG }}
|
||||
|
||||
- name: Build and push wolfi (tag)
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
|
||||
with:
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
context: ./
|
||||
file: ./Dockerfile.wolfi
|
||||
build-args: |
|
||||
BIN_ARM64=./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-aarch64
|
||||
BIN_AMD64=./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-amd64
|
||||
tags: |
|
||||
ghcr.io/${{ env.OWNER }}/wadm:latest-wolfi
|
||||
ghcr.io/${{ env.OWNER }}/wadm:${{ env.RELEASE_VERSION }}-wolfi
|
||||
ghcr.io/${{ env.OWNER }}/wadm:${{ env.RELEASE_VERSION_DOCKER_TAG }}-wolfi
|
||||
|
||||
- name: Build and push (main)
|
||||
uses: docker/build-push-action@v6
|
||||
if: github.ref == 'refs/heads/main'
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
if: ${{ github.ref == 'refs/heads/main' }}
|
||||
with:
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
@ -245,3 +267,16 @@ jobs:
|
|||
BIN_ARM64=./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-aarch64
|
||||
BIN_AMD64=./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-amd64
|
||||
tags: ghcr.io/${{ env.OWNER }}/wadm:canary
|
||||
|
||||
- name: Build and push (main)
|
||||
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
|
||||
if: ${{ github.ref == 'refs/heads/main' }}
|
||||
with:
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
context: ./
|
||||
file: ./Dockerfile.wolfi
|
||||
build-args: |
|
||||
BIN_ARM64=./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-aarch64
|
||||
BIN_AMD64=./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-amd64
|
||||
tags: ghcr.io/${{ env.OWNER }}/wadm:canary-wolfi
|
|
@ -0,0 +1,73 @@
|
|||
# This workflow uses actions that are not certified by GitHub. They are provided
|
||||
# by a third-party and are governed by separate terms of service, privacy
|
||||
# policy, and support documentation.
|
||||
|
||||
name: Scorecard supply-chain security
|
||||
on:
|
||||
# For Branch-Protection check. Only the default branch is supported. See
|
||||
# https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection
|
||||
branch_protection_rule:
|
||||
# To guarantee Maintained check is occasionally updated. See
|
||||
# https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained
|
||||
schedule:
|
||||
- cron: '28 13 * * 3'
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
|
||||
# Declare default permissions as read only.
|
||||
permissions: read-all
|
||||
|
||||
jobs:
|
||||
analysis:
|
||||
name: Scorecard analysis
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
# Needed to upload the results to code-scanning dashboard.
|
||||
security-events: write
|
||||
# Needed to publish results and get a badge (see publish_results below).
|
||||
id-token: write
|
||||
# Uncomment the permissions below if installing in a private repository.
|
||||
# contents: read
|
||||
# actions: read
|
||||
|
||||
steps:
|
||||
- name: "Checkout code"
|
||||
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: "Run analysis"
|
||||
uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.4.2
|
||||
with:
|
||||
results_file: results.sarif
|
||||
results_format: sarif
|
||||
# (Optional) "write" PAT token. Uncomment the `repo_token` line below if:
|
||||
# - you want to enable the Branch-Protection check on a *public* repository, or
|
||||
# - you are installing Scorecard on a *private* repository
|
||||
# To create the PAT, follow the steps in https://github.com/ossf/scorecard-action?tab=readme-ov-file#authentication-with-fine-grained-pat-optional.
|
||||
# repo_token: ${{ secrets.SCORECARD_TOKEN }}
|
||||
|
||||
# Public repositories:
|
||||
# - Publish results to OpenSSF REST API for easy access by consumers
|
||||
# - Allows the repository to include the Scorecard badge.
|
||||
# - See https://github.com/ossf/scorecard-action#publishing-results.
|
||||
# For private repositories:
|
||||
# - `publish_results` will always be set to `false`, regardless
|
||||
# of the value entered here.
|
||||
publish_results: true
|
||||
|
||||
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
||||
# format to the repository Actions tab.
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v3.pre.node20
|
||||
with:
|
||||
name: SARIF file
|
||||
path: results.sarif
|
||||
retention-days: 5
|
||||
|
||||
# Upload the results to GitHub's code scanning dashboard (optional).
|
||||
# Commenting out will disable upload of results to your repo's Code Scanning dashboard
|
||||
- name: "Upload to code-scanning"
|
||||
uses: github/codeql-action/upload-sarif@d6bbdef45e766d081b84a2def353b0055f728d3e # v3.29.3
|
||||
with:
|
||||
sarif_file: results.sarif
|
|
@ -5,6 +5,9 @@ on:
|
|||
branches:
|
||||
- main
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test:
|
||||
name: Test
|
||||
|
@ -12,19 +15,19 @@ jobs:
|
|||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-22.04]
|
||||
nats_version: [2.10.7]
|
||||
nats_version: [2.10.22]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
|
||||
- name: Install latest Rust stable toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
uses: dtolnay/rust-toolchain@1ff72ee08e3cb84d84adba594e0a297990fc1ed3 # stable
|
||||
with:
|
||||
toolchain: stable
|
||||
components: clippy, rustfmt
|
||||
|
||||
# Cache: rust
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
|
||||
with:
|
||||
key: '${{ matrix.os }}-rust-cache'
|
||||
|
||||
|
@ -36,8 +39,11 @@ jobs:
|
|||
echo 'Wadm JSON Schema is out of date. Please run `cargo run --bin wadm-schema` and commit the changes.'
|
||||
exit 1
|
||||
fi
|
||||
- name: Install wash
|
||||
uses: wasmCloud/common-actions/install-wash@main
|
||||
|
||||
- name: install wash
|
||||
uses: taiki-e/install-action@2c73a741d1544cc346e9b0af11868feba03eb69d # v2.58.9
|
||||
with:
|
||||
tool: wash@0.38.0
|
||||
|
||||
# GH Actions doesn't currently support passing args to service containers and there is no way
|
||||
# to use an environment variable to turn on jetstream for nats, so we manually start it here
|
||||
|
@ -48,6 +54,15 @@ jobs:
|
|||
run: |
|
||||
cargo build --all-features --all-targets --workspace
|
||||
|
||||
# Make sure the wadm crate works well with feature combinations
|
||||
# The above command builds the workspace and tests with no features
|
||||
- name: Check wadm crate with features
|
||||
run: |
|
||||
cargo check -p wadm --no-default-features
|
||||
cargo check -p wadm --features cli
|
||||
cargo check -p wadm --features http_admin
|
||||
cargo check -p wadm --features cli,http_admin
|
||||
|
||||
# Run all tests
|
||||
- name: Run tests
|
||||
run: |
|
||||
|
|
|
@ -3,28 +3,45 @@ name: wit-wasmcloud-wadm-publish
|
|||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'wit-wasmcloud-wadm-v*'
|
||||
- "wit-wasmcloud-wadm-v*"
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
packages: write
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
sparse-checkout: |
|
||||
wit
|
||||
- name: Extract tag context
|
||||
id: ctx
|
||||
run: |
|
||||
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
|
||||
with:
|
||||
sparse-checkout: |
|
||||
wit
|
||||
.github
|
||||
- name: Extract tag context
|
||||
id: ctx
|
||||
run: |
|
||||
version=${GITHUB_REF_NAME#wit-wasmcloud-wadm-v}
|
||||
echo "version=${version}" >> "$GITHUB_OUTPUT"
|
||||
echo "tarball=wit-wasmcloud-wadm-${version}.tar.gz" >> "$GITHUB_OUTPUT"
|
||||
echo "version is ${version}"
|
||||
- name: Build
|
||||
run: |
|
||||
tar cvzf ${{ steps.ctx.outputs.tarball }} -C wit wadm/wit
|
||||
- name: Release
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
files: ${{ steps.ctx.outputs.tarball }}
|
||||
make_latest: "false"
|
||||
- uses: ./.github/actions/configure-wkg
|
||||
with:
|
||||
oci-username: ${{ github.repository_owner }}
|
||||
oci-password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Build
|
||||
run: wkg wit build --wit-dir wit/wadm -o package.wasm
|
||||
- name: Push version-tagged WebAssembly binary to GHCR
|
||||
run: wkg publish package.wasm
|
||||
- name: Package tarball for release
|
||||
run: |
|
||||
mkdir -p release/wit
|
||||
cp wit/wadm/*.wit release/wit/
|
||||
tar cvzf ${{ steps.ctx.outputs.tarball }} -C release wit
|
||||
- name: Release
|
||||
uses: softprops/action-gh-release@72f2c25fcb47643c292f7107632f7a47c1df5cd8 # v2.3.2
|
||||
with:
|
||||
files: ${{ steps.ctx.outputs.tarball }}
|
||||
make_latest: "false"
|
||||
|
|
|
@ -8,4 +8,7 @@ tests/e2e_log/
|
|||
|
||||
# Ignore IDE specific files
|
||||
.idea/
|
||||
.vscode/
|
||||
.vscode/
|
||||
|
||||
.direnv/
|
||||
result
|
||||
|
|
File diff suppressed because it is too large
Load Diff
60
Cargo.toml
60
Cargo.toml
|
@ -1,13 +1,17 @@
|
|||
[package]
|
||||
name = "wadm-cli"
|
||||
description = "wasmCloud Application Deployment Manager: A tool for running Wasm applications in wasmCloud"
|
||||
version = "0.14.0"
|
||||
version.workspace = true
|
||||
edition = "2021"
|
||||
authors = ["wasmCloud Team"]
|
||||
keywords = ["webassembly", "wasmcloud", "wadm"]
|
||||
license = "Apache-2.0"
|
||||
readme = "README.md"
|
||||
repository = "https://github.com/wasmcloud/wadm"
|
||||
default-run = "wadm"
|
||||
|
||||
[workspace.package]
|
||||
version = "0.21.0"
|
||||
|
||||
[features]
|
||||
default = []
|
||||
|
@ -19,11 +23,7 @@ members = ["crates/*"]
|
|||
|
||||
[dependencies]
|
||||
anyhow = { workspace = true }
|
||||
async-nats = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
clap = { workspace = true, features = ["derive", "cargo", "env"] }
|
||||
futures = { workspace = true }
|
||||
nkeys = { workspace = true }
|
||||
# One version back to avoid clashes with 0.10 of otlp
|
||||
opentelemetry = { workspace = true, features = ["rt-tokio"] }
|
||||
# 0.10 to avoid protoc dep
|
||||
|
@ -35,27 +35,28 @@ schemars = { workspace = true }
|
|||
serde_json = { workspace = true }
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
tracing = { workspace = true, features = ["log"] }
|
||||
tracing-futures = { workspace = true }
|
||||
tracing-opentelemetry = { workspace = true }
|
||||
tracing-subscriber = { workspace = true, features = ["env-filter", "json"] }
|
||||
wasmcloud-control-interface = { workspace = true }
|
||||
wadm = { workspace = true }
|
||||
wadm = { workspace = true, features = ["cli", "http_admin"] }
|
||||
wadm-types = { workspace = true }
|
||||
|
||||
[workspace.dependencies]
|
||||
anyhow = "1"
|
||||
async-nats = "0.33"
|
||||
async-nats = "0.39"
|
||||
async-trait = "0.1"
|
||||
base64 = "0.22.1"
|
||||
bytes = "1"
|
||||
chrono = "0.4"
|
||||
clap = { version = "4", features = ["derive", "cargo", "env"] }
|
||||
cloudevents-sdk = "0.7"
|
||||
cloudevents-sdk = "0.8"
|
||||
futures = "0.3"
|
||||
http = { version = "1", default-features = false }
|
||||
http-body-util = { version = "0.1", default-features = false }
|
||||
hyper = { version = "1", default-features = false }
|
||||
hyper-util = { version = "0.1", default-features = false }
|
||||
indexmap = { version = "2", features = ["serde"] }
|
||||
jsonschema = "0.17"
|
||||
jsonschema = "0.29"
|
||||
lazy_static = "1"
|
||||
nkeys = "0.4.3"
|
||||
nkeys = "0.4.5"
|
||||
# One version back to avoid clashes with 0.10 of otlp
|
||||
opentelemetry = { version = "0.17", features = ["rt-tokio"] }
|
||||
# 0.10 to avoid protoc dep
|
||||
|
@ -63,40 +64,45 @@ opentelemetry-otlp = { version = "0.10", features = [
|
|||
"http-proto",
|
||||
"reqwest-client",
|
||||
] }
|
||||
rand = { version = "0.8", features = ["small_rng"] }
|
||||
regex = "1.10.6"
|
||||
rand = { version = "0.9", features = ["small_rng"] }
|
||||
# NOTE(thomastaylor312): Pinning this temporarily to 1.10 due to transitive dependency with oci
|
||||
# crates that are pinned to 1.10
|
||||
regex = "~1.10"
|
||||
schemars = "0.8"
|
||||
semver = { version = "1.0.16", features = ["serde"] }
|
||||
semver = { version = "1.0.25", features = ["serde"] }
|
||||
serde = "1"
|
||||
serde_json = "1"
|
||||
serde_yaml = "0.9"
|
||||
sha2 = "0.10.2"
|
||||
thiserror = "1"
|
||||
sha2 = "0.10.9"
|
||||
thiserror = "2"
|
||||
tokio = { version = "1", default-features = false }
|
||||
tracing = { version = "0.1", features = ["log"] }
|
||||
tracing-futures = "0.2"
|
||||
tracing-opentelemetry = { version = "0.17" }
|
||||
tracing-subscriber = { version = "0.3.7", features = ["env-filter", "json"] }
|
||||
ulid = { version = "1", features = ["serde"] }
|
||||
utoipa = "4"
|
||||
utoipa = "5"
|
||||
uuid = "1"
|
||||
wadm = { version = "0.14.0", path = "./crates/wadm" }
|
||||
wadm-client = { version = "0.3.0", path = "./crates/wadm-client" }
|
||||
wadm-types = { version = "0.3.0", path = "./crates/wadm-types" }
|
||||
wasmcloud-control-interface = "1.0.0"
|
||||
wasmcloud-secrets-types = "0.2.0"
|
||||
wit-bindgen-wrpc = { version = "0.3.7", default-features = false }
|
||||
wadm = { version = "0.21", path = "./crates/wadm" }
|
||||
wadm-client = { version = "0.10", path = "./crates/wadm-client" }
|
||||
wadm-types = { version = "0.8", path = "./crates/wadm-types" }
|
||||
wasmcloud-control-interface = "2.4.0"
|
||||
wasmcloud-secrets-types = "0.5.0"
|
||||
wit-bindgen-wrpc = { version = "0.9", default-features = false }
|
||||
wit-bindgen = { version = "0.36.0", default-features = false }
|
||||
|
||||
[dev-dependencies]
|
||||
base64 = { workspace = true }
|
||||
async-nats = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
serde_yaml = { workspace = true }
|
||||
serial_test = "3"
|
||||
wadm-client = { workspace = true }
|
||||
wadm-types = { workspace = true }
|
||||
testcontainers = "0.21"
|
||||
wasmcloud-control-interface = { workspace = true }
|
||||
testcontainers = "0.25"
|
||||
|
||||
[build-dependencies]
|
||||
schemars = { workspace = true }
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
FROM cgr.dev/chainguard/wolfi-base:latest AS base
|
||||
|
||||
FROM base AS base-amd64
|
||||
ARG BIN_AMD64
|
||||
ARG BIN=$BIN_AMD64
|
||||
|
||||
FROM base AS base-arm64
|
||||
ARG BIN_ARM64
|
||||
ARG BIN=$BIN_ARM64
|
||||
|
||||
FROM base-$TARGETARCH
|
||||
|
||||
# Copy application binary from disk
|
||||
COPY ${BIN} /usr/local/bin/wadm
|
||||
|
||||
# Run the application
|
||||
ENTRYPOINT ["/usr/local/bin/wadm"]
|
|
@ -0,0 +1,25 @@
|
|||
# MAINTAINERS
|
||||
|
||||
The following individuals are responsible for reviewing code, managing issues, and ensuring the overall quality of `wadm`.
|
||||
|
||||
## @wasmCloud/wadm-maintainers
|
||||
|
||||
Name: Joonas Bergius
|
||||
GitHub: @joonas
|
||||
Organization: Cosmonic
|
||||
|
||||
Name: Dan Norris
|
||||
GitHub: @protochron
|
||||
Organization: Cosmonic
|
||||
|
||||
Name: Taylor Thomas
|
||||
GitHub: @thomastaylor312
|
||||
Organization: Cosmonic
|
||||
|
||||
Name: Ahmed Tadde
|
||||
GitHub: @ahmedtadde
|
||||
Organization: PreciseTarget
|
||||
|
||||
Name: Brooks Townsend
|
||||
GitHub: @brooksmtownsend
|
||||
Organization: Cosmonic
|
|
@ -0,0 +1,3 @@
|
|||
# Reporting a security issue
|
||||
|
||||
Please refer to the [wasmCloud Security Process and Policy](https://github.com/wasmCloud/wasmCloud/blob/main/SECURITY.md) for details on how to report security issues and vulnerabilities.
|
|
@ -15,10 +15,10 @@ type: application
|
|||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: "0.2.5"
|
||||
version: '0.2.10'
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "v0.13.1"
|
||||
appVersion: 'v0.21.0'
|
||||
|
|
|
@ -1,4 +0,0 @@
|
|||
wadm:
|
||||
config:
|
||||
nats:
|
||||
server: "nats.default.svc.cluster.local:4222"
|
|
@ -36,10 +36,15 @@ Common labels
|
|||
{{- define "wadm.labels" -}}
|
||||
helm.sh/chart: {{ include "wadm.chart" . }}
|
||||
{{ include "wadm.selectorLabels" . }}
|
||||
app.kubernetes.io/component: wadm
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/part-of: wadm
|
||||
{{- with .Values.additionalLabels }}
|
||||
{{ . | toYaml }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
|
@ -50,6 +55,15 @@ app.kubernetes.io/name: {{ include "wadm.name" . }}
|
|||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "wadm.nats.server" -}}
|
||||
- name: WADM_NATS_SERVER
|
||||
{{- if .Values.wadm.config.nats.server }}
|
||||
value: {{ .Values.wadm.config.nats.server | quote }}
|
||||
{{- else }}
|
||||
value: nats-headless.{{ .Release.Namespace }}.svc.cluster.local
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "wadm.nats.auth" -}}
|
||||
{{- if .Values.wadm.config.nats.creds.secretName -}}
|
||||
- name: WADM_NATS_CREDS_FILE
|
||||
|
@ -89,4 +103,4 @@ volumes:
|
|||
path: "nats.creds"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
|
@ -34,8 +34,7 @@ spec:
|
|||
image: "{{ .Values.wadm.image.repository }}:{{ .Values.wadm.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.wadm.image.pullPolicy }}
|
||||
env:
|
||||
- name: WADM_NATS_SERVER
|
||||
value: {{ .Values.wadm.config.nats.server | quote }}
|
||||
{{- include "wadm.nats.server" . | nindent 12 }}
|
||||
{{- include "wadm.nats.auth" . | nindent 12 }}
|
||||
{{- if .Values.wadm.config.nats.tlsCaFile }}
|
||||
- name: WADM_NATS_TLS_CA_FILE
|
||||
|
@ -57,9 +56,9 @@ spec:
|
|||
- name: WADM_TRACING_ENDPOINT
|
||||
value: {{ .Values.wadm.config.tracingEndpoint | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.wadm.config.jetstreamDomain }}
|
||||
{{- if .Values.wadm.config.nats.jetstreamDomain }}
|
||||
- name: WADM_JETSTREAM_DOMAIN
|
||||
value: {{ .Values.wadm.config.jetstreamDomain | quote }}
|
||||
value: {{ .Values.wadm.config.nats.jetstreamDomain | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.wadm.config.maxJobs }}
|
||||
- name: WADM_MAX_JOBS
|
||||
|
|
|
@ -14,7 +14,7 @@ wadm:
|
|||
hostId: ""
|
||||
logLevel: ""
|
||||
nats:
|
||||
server: "127.0.0.1:4222"
|
||||
server: ""
|
||||
jetstreamDomain: ""
|
||||
tlsCaFile: ""
|
||||
creds:
|
||||
|
@ -34,6 +34,9 @@ imagePullSecrets: []
|
|||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
additionalLabels: {}
|
||||
# app: wadm
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
[package]
|
||||
name = "wadm-client"
|
||||
description = "A client library for interacting with the wadm API"
|
||||
version = "0.3.0"
|
||||
version = "0.10.0"
|
||||
edition = "2021"
|
||||
authors = ["wasmCloud Team"]
|
||||
keywords = ["webassembly", "wasmcloud", "wadm"]
|
||||
|
@ -11,14 +11,10 @@ repository = "https://github.com/wasmcloud/wadm"
|
|||
[dependencies]
|
||||
anyhow = { workspace = true }
|
||||
async-nats = { workspace = true }
|
||||
bytes = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
nkeys = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
serde_yaml = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
tracing = { workspace = true, features = ["log"] }
|
||||
tracing-futures = { workspace = true }
|
||||
wadm-types = { workspace = true }
|
||||
|
|
|
@ -74,9 +74,8 @@ impl TopicGenerator {
|
|||
|
||||
/// Returns the full topic for WADM status subscriptions
|
||||
pub fn wadm_status_topic(&self, app_name: &str) -> String {
|
||||
format!(
|
||||
"{}.{}.{}",
|
||||
WADM_STATUS_API_PREFIX, self.topic_prefix, app_name
|
||||
)
|
||||
// Extract just the lattice name from topic_prefix
|
||||
let lattice = self.topic_prefix.split('.').last().unwrap_or("default");
|
||||
format!("{}.{}.{}", WADM_STATUS_API_PREFIX, lattice, app_name)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
[package]
|
||||
name = "wadm-types"
|
||||
description = "Types and validators for the wadm API"
|
||||
version = "0.3.0"
|
||||
version = "0.8.3"
|
||||
edition = "2021"
|
||||
authors = ["wasmCloud Team"]
|
||||
keywords = ["webassembly", "wasmcloud", "wadm"]
|
||||
|
@ -9,36 +9,20 @@ license = "Apache-2.0"
|
|||
repository = "https://github.com/wasmcloud/wadm"
|
||||
|
||||
[features]
|
||||
default = []
|
||||
wit = ["wit-bindgen-wrpc"]
|
||||
wit = []
|
||||
|
||||
[dependencies]
|
||||
serde_yaml = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
async-nats = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
base64 = { workspace = true }
|
||||
bytes = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
cloudevents-sdk = { workspace = true }
|
||||
indexmap = { workspace = true, features = ["serde"] }
|
||||
jsonschema = { workspace = true }
|
||||
lazy_static = { workspace = true }
|
||||
nkeys = { workspace = true }
|
||||
rand = { workspace = true, features = ["small_rng"] }
|
||||
regex = { workspace = true }
|
||||
schemars = { workspace = true }
|
||||
semver = { workspace = true, features = ["serde"] }
|
||||
serde = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
tracing = { workspace = true, features = ["log"] }
|
||||
tracing-futures = { workspace = true }
|
||||
ulid = { workspace = true, features = ["serde"] }
|
||||
serde_yaml = { workspace = true }
|
||||
utoipa = { workspace = true }
|
||||
uuid = { workspace = true }
|
||||
wasmcloud-control-interface = { workspace = true }
|
||||
wasmcloud-secrets-types = { workspace = true }
|
||||
wit-bindgen-wrpc = { workspace = true, optional = true }
|
||||
|
||||
[target.'cfg(not(target_family = "wasm"))'.dependencies]
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
wit-bindgen-wrpc = { workspace = true }
|
||||
|
||||
[target.'cfg(target_family = "wasm")'.dependencies]
|
||||
wit-bindgen = { workspace = true, features = ["macros"] }
|
||||
|
|
|
@ -23,6 +23,14 @@ pub struct GetModelResponse {
|
|||
pub manifest: Option<Manifest>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct ListModelsResponse {
|
||||
pub result: GetResult,
|
||||
#[serde(default)]
|
||||
pub message: String,
|
||||
pub models: Vec<ModelSummary>,
|
||||
}
|
||||
|
||||
/// Possible outcomes of a get request
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
|
@ -275,6 +283,13 @@ impl StatusInfo {
|
|||
message: message.to_owned(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn unhealthy(message: &str) -> Self {
|
||||
StatusInfo {
|
||||
status_type: StatusType::Unhealthy,
|
||||
message: message.to_owned(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// All possible status types
|
||||
|
@ -289,6 +304,7 @@ pub enum StatusType {
|
|||
#[serde(alias = "ready")]
|
||||
Deployed,
|
||||
Failed,
|
||||
Unhealthy,
|
||||
}
|
||||
|
||||
// Implementing add makes it easy for use to get an aggregate status by summing all of them together
|
||||
|
@ -316,6 +332,8 @@ impl std::ops::Add for StatusType {
|
|||
(_, Self::Waiting) => Self::Waiting,
|
||||
(Self::Reconciling, _) => Self::Reconciling,
|
||||
(_, Self::Reconciling) => Self::Reconciling,
|
||||
(Self::Unhealthy, _) => Self::Unhealthy,
|
||||
(_, Self::Unhealthy) => Self::Unhealthy,
|
||||
// This is technically covered in the first comparison, but we'll be explicit
|
||||
(Self::Deployed, Self::Deployed) => Self::Deployed,
|
||||
}
|
||||
|
@ -383,6 +401,20 @@ mod test {
|
|||
StatusType::Failed
|
||||
));
|
||||
|
||||
assert!(matches!(
|
||||
[StatusType::Deployed, StatusType::Unhealthy]
|
||||
.into_iter()
|
||||
.sum(),
|
||||
StatusType::Unhealthy
|
||||
));
|
||||
|
||||
assert!(matches!(
|
||||
[StatusType::Reconciling, StatusType::Unhealthy]
|
||||
.into_iter()
|
||||
.sum(),
|
||||
StatusType::Reconciling
|
||||
));
|
||||
|
||||
let empty: Vec<StatusType> = Vec::new();
|
||||
assert!(matches!(empty.into_iter().sum(), StatusType::Undeployed));
|
||||
}
|
||||
|
|
|
@ -5,16 +5,37 @@ use crate::{
|
|||
},
|
||||
CapabilityProperties, Component, ComponentProperties, ConfigDefinition, ConfigProperty,
|
||||
LinkProperty, Manifest, Metadata, Policy, Properties, SecretProperty, SecretSourceProperty,
|
||||
Specification, Spread, SpreadScalerProperty, TargetConfig, Trait, TraitProperty,
|
||||
SharedApplicationComponentProperties, Specification, Spread, SpreadScalerProperty,
|
||||
TargetConfig, Trait, TraitProperty,
|
||||
};
|
||||
use wasmcloud::wadm;
|
||||
|
||||
#[cfg(all(feature = "wit", target_family = "wasm"))]
|
||||
wit_bindgen::generate!({
|
||||
path: "wit",
|
||||
additional_derives: [
|
||||
serde::Serialize,
|
||||
serde::Deserialize,
|
||||
],
|
||||
with: {
|
||||
"wasmcloud:wadm/types@0.2.0": generate,
|
||||
"wasmcloud:wadm/client@0.2.0": generate,
|
||||
"wasmcloud:wadm/handler@0.2.0": generate
|
||||
}
|
||||
});
|
||||
|
||||
#[cfg(all(feature = "wit", not(target_family = "wasm")))]
|
||||
wit_bindgen_wrpc::generate!({
|
||||
generate_unused_types: true,
|
||||
additional_derives: [
|
||||
serde::Serialize,
|
||||
serde::Deserialize,
|
||||
],
|
||||
with: {
|
||||
"wasmcloud:wadm/types@0.2.0": generate,
|
||||
"wasmcloud:wadm/client@0.2.0": generate,
|
||||
"wasmcloud:wadm/handler@0.2.0": generate
|
||||
}
|
||||
});
|
||||
|
||||
// Trait implementations for converting types in the API module to the generated types
|
||||
|
@ -65,7 +86,7 @@ impl From<Policy> for wadm::types::Policy {
|
|||
fn from(policy: Policy) -> Self {
|
||||
wadm::types::Policy {
|
||||
name: policy.name,
|
||||
properties: policy.properties.into_iter().map(|p| p.into()).collect(),
|
||||
properties: policy.properties.into_iter().collect(),
|
||||
type_: policy.policy_type,
|
||||
}
|
||||
}
|
||||
|
@ -87,6 +108,7 @@ impl From<Properties> for wadm::types::Properties {
|
|||
impl From<ComponentProperties> for wadm::types::ComponentProperties {
|
||||
fn from(properties: ComponentProperties) -> Self {
|
||||
wadm::types::ComponentProperties {
|
||||
application: properties.application.map(Into::into),
|
||||
image: properties.image,
|
||||
id: properties.id,
|
||||
config: properties.config.into_iter().map(|c| c.into()).collect(),
|
||||
|
@ -98,6 +120,7 @@ impl From<ComponentProperties> for wadm::types::ComponentProperties {
|
|||
impl From<CapabilityProperties> for wadm::types::CapabilityProperties {
|
||||
fn from(properties: CapabilityProperties) -> Self {
|
||||
wadm::types::CapabilityProperties {
|
||||
application: properties.application.map(Into::into),
|
||||
image: properties.image,
|
||||
id: properties.id,
|
||||
config: properties.config.into_iter().map(|c| c.into()).collect(),
|
||||
|
@ -135,6 +158,17 @@ impl From<SecretSourceProperty> for wadm::types::SecretSourceProperty {
|
|||
}
|
||||
}
|
||||
|
||||
impl From<SharedApplicationComponentProperties>
|
||||
for wadm::types::SharedApplicationComponentProperties
|
||||
{
|
||||
fn from(properties: SharedApplicationComponentProperties) -> Self {
|
||||
wadm::types::SharedApplicationComponentProperties {
|
||||
name: properties.name,
|
||||
component: properties.component,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Trait> for wadm::types::Trait {
|
||||
fn from(trait_: Trait) -> Self {
|
||||
wadm::types::Trait {
|
||||
|
@ -258,6 +292,7 @@ impl From<StatusType> for wadm::types::StatusType {
|
|||
StatusType::Deployed => wadm::types::StatusType::Deployed,
|
||||
StatusType::Failed => wadm::types::StatusType::Failed,
|
||||
StatusType::Waiting => wadm::types::StatusType::Waiting,
|
||||
StatusType::Unhealthy => wadm::types::StatusType::Unhealthy,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -272,6 +307,7 @@ impl From<wadm::types::StatusType> for StatusType {
|
|||
wadm::types::StatusType::Deployed => StatusType::Deployed,
|
||||
wadm::types::StatusType::Failed => StatusType::Failed,
|
||||
wadm::types::StatusType::Waiting => StatusType::Waiting,
|
||||
wadm::types::StatusType::Unhealthy => StatusType::Unhealthy,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -368,7 +404,7 @@ impl From<wadm::types::Policy> for Policy {
|
|||
fn from(policy: wadm::types::Policy) -> Self {
|
||||
Policy {
|
||||
name: policy.name,
|
||||
properties: policy.properties.into_iter().map(|p| p.into()).collect(),
|
||||
properties: policy.properties.into_iter().collect(),
|
||||
policy_type: policy.type_,
|
||||
}
|
||||
}
|
||||
|
@ -391,6 +427,7 @@ impl From<wadm::types::ComponentProperties> for ComponentProperties {
|
|||
fn from(properties: wadm::types::ComponentProperties) -> Self {
|
||||
ComponentProperties {
|
||||
image: properties.image,
|
||||
application: properties.application.map(Into::into),
|
||||
id: properties.id,
|
||||
config: properties.config.into_iter().map(|c| c.into()).collect(),
|
||||
secrets: properties.secrets.into_iter().map(|c| c.into()).collect(),
|
||||
|
@ -402,6 +439,7 @@ impl From<wadm::types::CapabilityProperties> for CapabilityProperties {
|
|||
fn from(properties: wadm::types::CapabilityProperties) -> Self {
|
||||
CapabilityProperties {
|
||||
image: properties.image,
|
||||
application: properties.application.map(Into::into),
|
||||
id: properties.id,
|
||||
config: properties.config.into_iter().map(|c| c.into()).collect(),
|
||||
secrets: properties.secrets.into_iter().map(|c| c.into()).collect(),
|
||||
|
@ -438,6 +476,17 @@ impl From<wadm::types::SecretSourceProperty> for SecretSourceProperty {
|
|||
}
|
||||
}
|
||||
|
||||
impl From<wadm::types::SharedApplicationComponentProperties>
|
||||
for SharedApplicationComponentProperties
|
||||
{
|
||||
fn from(properties: wadm::types::SharedApplicationComponentProperties) -> Self {
|
||||
SharedApplicationComponentProperties {
|
||||
name: properties.name,
|
||||
component: properties.component,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<wadm::types::Trait> for Trait {
|
||||
fn from(trait_: wadm::types::Trait) -> Self {
|
||||
Trait {
|
||||
|
|
|
@ -2,6 +2,7 @@ use std::collections::{BTreeMap, HashMap};
|
|||
|
||||
use schemars::JsonSchema;
|
||||
use serde::{de, Deserialize, Serialize};
|
||||
use utoipa::ToSchema;
|
||||
|
||||
pub mod api;
|
||||
#[cfg(feature = "wit")]
|
||||
|
@ -24,6 +25,8 @@ pub const VERSION_ANNOTATION_KEY: &str = "version";
|
|||
/// The description key, as predefined by the [OAM
|
||||
/// spec](https://github.com/oam-dev/spec/blob/master/metadata.md#annotations-format)
|
||||
pub const DESCRIPTION_ANNOTATION_KEY: &str = "description";
|
||||
/// The annotation key for shared applications
|
||||
pub const SHARED_ANNOTATION_KEY: &str = "experimental.wasmcloud.dev/shared";
|
||||
/// The identifier for the builtin spreadscaler trait type
|
||||
pub const SPREADSCALER_TRAIT: &str = "spreadscaler";
|
||||
/// The identifier for the builtin daemonscaler trait type
|
||||
|
@ -33,9 +36,11 @@ pub const LINK_TRAIT: &str = "link";
|
|||
/// The string used for indicating a latest version. It is explicitly forbidden to use as a version
|
||||
/// for a manifest
|
||||
pub const LATEST_VERSION: &str = "latest";
|
||||
/// The default link name
|
||||
pub const DEFAULT_LINK_NAME: &str = "default";
|
||||
|
||||
/// An OAM manifest
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, utoipa::ToSchema, JsonSchema)]
|
||||
/// Manifest file based on the Open Application Model (OAM) specification for declaratively managing wasmCloud applications
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct Manifest {
|
||||
/// The OAM version of the manifest
|
||||
|
@ -67,11 +72,65 @@ impl Manifest {
|
|||
.map(|v| v.as_str())
|
||||
}
|
||||
|
||||
/// Indicates if the manifest is shared, meaning it can be used by multiple applications
|
||||
pub fn shared(&self) -> bool {
|
||||
self.metadata
|
||||
.annotations
|
||||
.get(SHARED_ANNOTATION_KEY)
|
||||
.is_some_and(|v| v.parse::<bool>().unwrap_or(false))
|
||||
}
|
||||
|
||||
/// Returns the components in the manifest
|
||||
pub fn components(&self) -> impl Iterator<Item = &Component> {
|
||||
self.spec.components.iter()
|
||||
}
|
||||
|
||||
/// Helper function to find shared components that are missing from the given list of
|
||||
/// deployed applications
|
||||
pub fn missing_shared_components(&self, deployed_apps: &[&Manifest]) -> Vec<&Component> {
|
||||
self.spec
|
||||
.components
|
||||
.iter()
|
||||
.filter(|shared_component| {
|
||||
match &shared_component.properties {
|
||||
Properties::Capability {
|
||||
properties:
|
||||
CapabilityProperties {
|
||||
image: None,
|
||||
application: Some(shared_app),
|
||||
..
|
||||
},
|
||||
}
|
||||
| Properties::Component {
|
||||
properties:
|
||||
ComponentProperties {
|
||||
image: None,
|
||||
application: Some(shared_app),
|
||||
..
|
||||
},
|
||||
} => {
|
||||
if deployed_apps.iter().filter(|a| a.shared()).any(|m| {
|
||||
m.metadata.name == shared_app.name
|
||||
&& m.components().any(|c| {
|
||||
c.name == shared_app.component
|
||||
// This compares just the enum variant, not the actual properties
|
||||
// For example, if we reference a shared component that's a capability,
|
||||
// we want to make sure the deployed component is a capability.
|
||||
&& std::mem::discriminant(&c.properties)
|
||||
== std::mem::discriminant(&shared_component.properties)
|
||||
})
|
||||
}) {
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}
|
||||
_ => false,
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Returns only the WebAssembly components in the manifest
|
||||
pub fn wasm_components(&self) -> impl Iterator<Item = &Component> {
|
||||
self.components()
|
||||
|
@ -115,7 +174,7 @@ impl Manifest {
|
|||
}
|
||||
|
||||
/// The metadata describing the manifest
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, JsonSchema)]
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
|
||||
pub struct Metadata {
|
||||
/// The name of the manifest. This must be unique per lattice
|
||||
pub name: String,
|
||||
|
@ -128,7 +187,7 @@ pub struct Metadata {
|
|||
}
|
||||
|
||||
/// A representation of an OAM specification
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, JsonSchema)]
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
|
||||
pub struct Specification {
|
||||
/// The list of components for describing an application
|
||||
pub components: Vec<Component>,
|
||||
|
@ -141,7 +200,7 @@ pub struct Specification {
|
|||
}
|
||||
|
||||
/// A policy definition
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, JsonSchema)]
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
|
||||
pub struct Policy {
|
||||
/// The name of this policy
|
||||
pub name: String,
|
||||
|
@ -153,9 +212,9 @@ pub struct Policy {
|
|||
}
|
||||
|
||||
/// A component definition
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, JsonSchema)]
|
||||
// TODO: for some reason this works fine for capapilities but not components
|
||||
//#[serde(deny_unknown_fields)]
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
|
||||
// TODO: figure out why this can't be uncommented
|
||||
// #[serde(deny_unknown_fields)]
|
||||
pub struct Component {
|
||||
/// The name of this component
|
||||
pub name: String,
|
||||
|
@ -199,10 +258,15 @@ impl Component {
|
|||
};
|
||||
secrets
|
||||
}
|
||||
|
||||
/// Returns only links in the component
|
||||
fn links(&self) -> impl Iterator<Item = &Trait> {
|
||||
self.traits.iter().flatten().filter(|t| t.is_link())
|
||||
}
|
||||
}
|
||||
|
||||
/// Properties that can be defined for a component
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, JsonSchema)]
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
|
||||
#[serde(tag = "type")]
|
||||
pub enum Properties {
|
||||
#[serde(rename = "component", alias = "actor")]
|
||||
|
@ -211,11 +275,17 @@ pub enum Properties {
|
|||
Capability { properties: CapabilityProperties },
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, JsonSchema)]
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct ComponentProperties {
|
||||
/// The image reference to use
|
||||
pub image: String,
|
||||
/// The image reference to use. Required unless the component is a shared component
|
||||
/// that is defined in another shared application.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub image: Option<String>,
|
||||
/// Information to locate a component within a shared application. Cannot be specified
|
||||
/// if the image is specified.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub application: Option<SharedApplicationComponentProperties>,
|
||||
/// The component ID to use for this component. If not supplied, it will be generated
|
||||
/// as a combination of the [Metadata::name] and the image reference.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
|
@ -230,7 +300,7 @@ pub struct ComponentProperties {
|
|||
pub secrets: Vec<SecretProperty>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Default, JsonSchema)]
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Default, ToSchema, JsonSchema)]
|
||||
pub struct ConfigDefinition {
|
||||
#[serde(default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub config: Vec<ConfigProperty>,
|
||||
|
@ -238,7 +308,7 @@ pub struct ConfigDefinition {
|
|||
pub secrets: Vec<SecretProperty>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash, JsonSchema)]
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash, ToSchema, JsonSchema)]
|
||||
pub struct SecretProperty {
|
||||
/// The name of the secret. This is used by a reference by the component or capability to
|
||||
/// get the secret value as a resource.
|
||||
|
@ -248,7 +318,7 @@ pub struct SecretProperty {
|
|||
pub properties: SecretSourceProperty,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash, JsonSchema)]
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash, ToSchema, JsonSchema)]
|
||||
pub struct SecretSourceProperty {
|
||||
/// The policy to use for retrieving the secret.
|
||||
pub policy: String,
|
||||
|
@ -263,11 +333,17 @@ pub struct SecretSourceProperty {
|
|||
pub version: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, JsonSchema)]
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct CapabilityProperties {
|
||||
/// The image reference to use
|
||||
pub image: String,
|
||||
/// The image reference to use. Required unless the component is a shared component
|
||||
/// that is defined in another shared application.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub image: Option<String>,
|
||||
/// Information to locate a component within a shared application. Cannot be specified
|
||||
/// if the image is specified.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub application: Option<SharedApplicationComponentProperties>,
|
||||
/// The component ID to use for this provider. If not supplied, it will be generated
|
||||
/// as a combination of the [Metadata::name] and the image reference.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
|
@ -282,7 +358,15 @@ pub struct CapabilityProperties {
|
|||
pub secrets: Vec<SecretProperty>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, JsonSchema)]
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
|
||||
pub struct SharedApplicationComponentProperties {
|
||||
/// The name of the shared application
|
||||
pub name: String,
|
||||
/// The name of the component in the shared application
|
||||
pub component: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct Trait {
|
||||
/// The type of trait specified. This should be a unique string for the type of scaler. As we
|
||||
|
@ -307,6 +391,11 @@ impl Trait {
|
|||
self.trait_type == LINK_TRAIT
|
||||
}
|
||||
|
||||
/// Check if a trait is a scaler
|
||||
pub fn is_scaler(&self) -> bool {
|
||||
self.trait_type == SPREADSCALER_TRAIT || self.trait_type == DAEMONSCALER_TRAIT
|
||||
}
|
||||
|
||||
/// Helper that creates a new spreadscaler type trait with the given properties
|
||||
pub fn new_spreadscaler(props: SpreadScalerProperty) -> Trait {
|
||||
Trait {
|
||||
|
@ -324,7 +413,7 @@ impl Trait {
|
|||
}
|
||||
|
||||
/// Properties for defining traits
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, JsonSchema)]
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
|
||||
#[serde(untagged)]
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
pub enum TraitProperty {
|
||||
|
@ -348,11 +437,11 @@ impl From<SpreadScalerProperty> for TraitProperty {
|
|||
}
|
||||
}
|
||||
|
||||
impl From<serde_json::Value> for TraitProperty {
|
||||
fn from(value: serde_json::Value) -> Self {
|
||||
Self::Custom(value)
|
||||
}
|
||||
}
|
||||
// impl From<serde_json::Value> for TraitProperty {
|
||||
// fn from(value: serde_json::Value) -> Self {
|
||||
// Self::Custom(value)
|
||||
// }
|
||||
// }
|
||||
|
||||
/// Properties for the config list associated with components, providers, and links
|
||||
///
|
||||
|
@ -368,7 +457,7 @@ impl From<serde_json::Value> for TraitProperty {
|
|||
///
|
||||
/// Will result in two config scalers being created, one with the name `basic-kv` and one with the
|
||||
/// name `default-port`. Wadm will not resolve collisions with configuration names between manifests.
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, JsonSchema)]
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct ConfigProperty {
|
||||
/// Name of the config to ensure exists
|
||||
|
@ -388,7 +477,7 @@ impl PartialEq<ConfigProperty> for String {
|
|||
}
|
||||
|
||||
/// Properties for links
|
||||
#[derive(Debug, Serialize, Clone, PartialEq, Eq, JsonSchema, Default)]
|
||||
#[derive(Debug, Serialize, Clone, PartialEq, Eq, ToSchema, JsonSchema, Default)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct LinkProperty {
|
||||
/// WIT namespace for the link
|
||||
|
@ -488,7 +577,7 @@ impl<'de> Deserialize<'de> for LinkProperty {
|
|||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Default, JsonSchema)]
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Default, ToSchema, JsonSchema)]
|
||||
pub struct TargetConfig {
|
||||
/// The target this link applies to. This should be the name of a component in the manifest
|
||||
pub name: String,
|
||||
|
@ -505,7 +594,7 @@ impl PartialEq<TargetConfig> for String {
|
|||
}
|
||||
|
||||
/// Properties for spread scalers
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, JsonSchema)]
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct SpreadScalerProperty {
|
||||
/// Number of instances to spread across matching requirements
|
||||
|
@ -517,7 +606,7 @@ pub struct SpreadScalerProperty {
|
|||
}
|
||||
|
||||
/// Configuration for various spreading requirements
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, JsonSchema)]
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct Spread {
|
||||
/// The name of this spread requirement
|
||||
|
@ -683,7 +772,7 @@ mod test {
|
|||
&component.properties,
|
||||
Properties::Capability {
|
||||
properties: CapabilityProperties { image, .. }
|
||||
} if image == "wasmcloud.azurecr.io/httpserver:0.13.1"
|
||||
} if image.clone().expect("image to be present") == "wasmcloud.azurecr.io/httpserver:0.13.1"
|
||||
)
|
||||
})
|
||||
.expect("Should find capability component")
|
||||
|
@ -751,7 +840,8 @@ mod test {
|
|||
name: "userinfo".to_string(),
|
||||
properties: Properties::Component {
|
||||
properties: ComponentProperties {
|
||||
image: "wasmcloud.azurecr.io/fake:1".to_string(),
|
||||
image: Some("wasmcloud.azurecr.io/fake:1".to_string()),
|
||||
application: None,
|
||||
id: None,
|
||||
config: vec![],
|
||||
secrets: vec![],
|
||||
|
@ -764,7 +854,8 @@ mod test {
|
|||
name: "webcap".to_string(),
|
||||
properties: Properties::Capability {
|
||||
properties: CapabilityProperties {
|
||||
image: "wasmcloud.azurecr.io/httpserver:0.13.1".to_string(),
|
||||
image: Some("wasmcloud.azurecr.io/httpserver:0.13.1".to_string()),
|
||||
application: None,
|
||||
id: None,
|
||||
config: vec![],
|
||||
secrets: vec![],
|
||||
|
@ -792,7 +883,8 @@ mod test {
|
|||
name: "ledblinky".to_string(),
|
||||
properties: Properties::Capability {
|
||||
properties: CapabilityProperties {
|
||||
image: "wasmcloud.azurecr.io/ledblinky:0.0.1".to_string(),
|
||||
image: Some("wasmcloud.azurecr.io/ledblinky:0.0.1".to_string()),
|
||||
application: None,
|
||||
id: None,
|
||||
config: vec![],
|
||||
secrets: vec![],
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
//!
|
||||
|
||||
use std::collections::{HashMap, HashSet};
|
||||
#[cfg(not(target_family = "wasm"))]
|
||||
use std::path::Path;
|
||||
use std::sync::OnceLock;
|
||||
|
||||
|
@ -11,7 +12,7 @@ use serde::{Deserialize, Serialize};
|
|||
|
||||
use crate::{
|
||||
CapabilityProperties, ComponentProperties, LinkProperty, Manifest, Properties, Trait,
|
||||
TraitProperty, LATEST_VERSION,
|
||||
TraitProperty, DEFAULT_LINK_NAME, LATEST_VERSION,
|
||||
};
|
||||
|
||||
/// A namespace -> package -> interface lookup
|
||||
|
@ -60,7 +61,12 @@ fn get_known_interface_lookup() -> &'static KnownInterfaceLookup {
|
|||
("config".into(), HashMap::from([("runtime".into(), ())])),
|
||||
(
|
||||
"keyvalue".into(),
|
||||
HashMap::from([("atomics".into(), ()), ("store".into(), ())]),
|
||||
HashMap::from([
|
||||
("atomics".into(), ()),
|
||||
("store".into(), ()),
|
||||
("batch".into(), ()),
|
||||
("watch".into(), ()),
|
||||
]),
|
||||
),
|
||||
(
|
||||
"http".into(),
|
||||
|
@ -154,9 +160,10 @@ fn is_invalid_known_interface(
|
|||
};
|
||||
// Unknown interface inside known namespace and package is probably a bug
|
||||
if !iface_lookup.contains_key(interface) {
|
||||
// Unknown package inside a known interface we control is probably a bug
|
||||
// Unknown package inside a known interface we control is probably a bug, but may be
|
||||
// a new interface we don't know about yet
|
||||
return vec![ValidationFailure::new(
|
||||
ValidationFailureLevel::Error,
|
||||
ValidationFailureLevel::Warning,
|
||||
format!("unrecognized interface [{namespace}:{package}/{interface}]"),
|
||||
)];
|
||||
}
|
||||
|
@ -268,6 +275,7 @@ impl ValidationOutput for Vec<ValidationFailure> {
|
|||
/// # Arguments
|
||||
///
|
||||
/// * `path` - Path to the Manifest that will be read into memory and validated
|
||||
#[cfg(not(target_family = "wasm"))]
|
||||
pub async fn validate_manifest_file(
|
||||
path: impl AsRef<Path>,
|
||||
) -> Result<(Manifest, Vec<ValidationFailure>)> {
|
||||
|
@ -291,9 +299,12 @@ pub async fn validate_manifest_file(
|
|||
pub async fn validate_manifest_bytes(
|
||||
content: impl AsRef<[u8]>,
|
||||
) -> Result<(Manifest, Vec<ValidationFailure>)> {
|
||||
let raw_yaml_content = content.as_ref();
|
||||
let manifest =
|
||||
serde_yaml::from_slice(content.as_ref()).context("failed to parse manifest content")?;
|
||||
let failures = validate_manifest(&manifest).await?;
|
||||
let mut failures = validate_manifest(&manifest).await?;
|
||||
let mut yaml_issues = validate_raw_yaml(raw_yaml_content)?;
|
||||
failures.append(&mut yaml_issues);
|
||||
Ok((manifest, failures))
|
||||
}
|
||||
|
||||
|
@ -335,6 +346,18 @@ pub async fn validate_manifest(manifest: &Manifest) -> Result<Vec<ValidationFail
|
|||
failures.extend(check_misnamed_interfaces(manifest));
|
||||
failures.extend(check_dangling_links(manifest));
|
||||
failures.extend(validate_policies(manifest));
|
||||
failures.extend(ensure_no_custom_traits(manifest));
|
||||
failures.extend(validate_component_properties(manifest));
|
||||
failures.extend(check_duplicate_links(manifest));
|
||||
failures.extend(validate_link_configs(manifest));
|
||||
Ok(failures)
|
||||
}
|
||||
|
||||
pub fn validate_raw_yaml(content: &[u8]) -> Result<Vec<ValidationFailure>> {
|
||||
let mut failures = Vec::new();
|
||||
let raw_content: serde_yaml::Value =
|
||||
serde_yaml::from_slice(content).context("failed read raw yaml content")?;
|
||||
failures.extend(validate_components_configs(&raw_content));
|
||||
Ok(failures)
|
||||
}
|
||||
|
||||
|
@ -461,6 +484,32 @@ fn check_misnamed_interfaces(manifest: &Manifest) -> Vec<ValidationFailure> {
|
|||
failures
|
||||
}
|
||||
|
||||
/// This validation rule should eventually be removed, but at this time (as of wadm 0.14.0)
|
||||
/// custom traits are not supported. We technically deserialize the custom trait, but 99%
|
||||
/// of the time this is just a poorly formatted spread or link scaler which is incredibly
|
||||
/// frustrating to debug.
|
||||
fn ensure_no_custom_traits(manifest: &Manifest) -> Vec<ValidationFailure> {
|
||||
let mut failures = Vec::new();
|
||||
for component in manifest.components() {
|
||||
if let Some(traits) = &component.traits {
|
||||
for trait_item in traits {
|
||||
match &trait_item.properties {
|
||||
TraitProperty::Custom(trt) if trait_item.is_link() => failures.push(ValidationFailure::new(
|
||||
ValidationFailureLevel::Error,
|
||||
format!("Link trait deserialized as custom trait, ensure fields are correct: {}", trt),
|
||||
)),
|
||||
TraitProperty::Custom(trt) if trait_item.is_scaler() => failures.push(ValidationFailure::new(
|
||||
ValidationFailureLevel::Error,
|
||||
format!("Scaler trait deserialized as custom trait, ensure fields are correct: {}", trt),
|
||||
)),
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
failures
|
||||
}
|
||||
|
||||
/// Check for "dangling" links, which contain targets that are not specified elsewhere in the
|
||||
/// WADM manifest.
|
||||
///
|
||||
|
@ -564,6 +613,180 @@ fn validate_policies(manifest: &Manifest) -> Vec<ValidationFailure> {
|
|||
failures
|
||||
}
|
||||
|
||||
/// Ensure that all components in a manifest either specify an image reference or a shared
|
||||
/// component in a different manifest. Note that this does not validate that the image reference
|
||||
/// is valid or that the shared component is valid, only that one of the two properties is set.
|
||||
pub fn validate_component_properties(application: &Manifest) -> Vec<ValidationFailure> {
|
||||
let mut failures = Vec::new();
|
||||
for component in application.spec.components.iter() {
|
||||
match &component.properties {
|
||||
Properties::Component {
|
||||
properties:
|
||||
ComponentProperties {
|
||||
image,
|
||||
application,
|
||||
config,
|
||||
secrets,
|
||||
..
|
||||
},
|
||||
}
|
||||
| Properties::Capability {
|
||||
properties:
|
||||
CapabilityProperties {
|
||||
image,
|
||||
application,
|
||||
config,
|
||||
secrets,
|
||||
..
|
||||
},
|
||||
} => match (image, application) {
|
||||
(Some(_), Some(_)) => {
|
||||
failures.push(ValidationFailure::new(
|
||||
ValidationFailureLevel::Error,
|
||||
"Component cannot have both 'image' and 'application' properties".into(),
|
||||
));
|
||||
}
|
||||
(None, None) => {
|
||||
failures.push(ValidationFailure::new(
|
||||
ValidationFailureLevel::Error,
|
||||
"Component must have either 'image' or 'application' property".into(),
|
||||
));
|
||||
}
|
||||
// This is a problem because of our left-folding config implementation. A shared application
|
||||
// could specify additional config and actually overwrite the original manifest's config.
|
||||
(None, Some(shared_properties)) if !config.is_empty() => {
|
||||
failures.push(ValidationFailure::new(
|
||||
ValidationFailureLevel::Error,
|
||||
format!(
|
||||
"Shared component '{}' cannot specify additional 'config'",
|
||||
shared_properties.name
|
||||
),
|
||||
));
|
||||
}
|
||||
(None, Some(shared_properties)) if !secrets.is_empty() => {
|
||||
failures.push(ValidationFailure::new(
|
||||
ValidationFailureLevel::Error,
|
||||
format!(
|
||||
"Shared component '{}' cannot specify additional 'secrets'",
|
||||
shared_properties.name
|
||||
),
|
||||
));
|
||||
}
|
||||
// Shared application components already have scale properties defined in their original manifest
|
||||
(None, Some(shared_properties))
|
||||
if component
|
||||
.traits
|
||||
.as_ref()
|
||||
.is_some_and(|traits| traits.iter().any(|trt| trt.is_scaler())) =>
|
||||
{
|
||||
failures.push(ValidationFailure::new(
|
||||
ValidationFailureLevel::Error,
|
||||
format!(
|
||||
"Shared component '{}' cannot include a scaler trait",
|
||||
shared_properties.name
|
||||
),
|
||||
));
|
||||
}
|
||||
_ => {}
|
||||
},
|
||||
}
|
||||
}
|
||||
failures
|
||||
}
|
||||
|
||||
/// Validates link configs in a WADM application manifest.
|
||||
///
|
||||
/// At present this can check for:
|
||||
/// - all configs that declare `properties` have unique names
|
||||
/// (configs without properties refer to existing configs)
|
||||
///
|
||||
pub fn validate_link_configs(manifest: &Manifest) -> Vec<ValidationFailure> {
|
||||
let mut failures = Vec::new();
|
||||
let mut link_config_names = HashSet::new();
|
||||
for link_trait in manifest.links() {
|
||||
if let TraitProperty::Link(LinkProperty { target, source, .. }) = &link_trait.properties {
|
||||
for config in &target.config {
|
||||
// we only need to check for uniqueness of configs with properties
|
||||
if config.properties.is_none() {
|
||||
continue;
|
||||
}
|
||||
// Check if config name is unique
|
||||
if !link_config_names.insert(config.name.clone()) {
|
||||
failures.push(ValidationFailure::new(
|
||||
ValidationFailureLevel::Error,
|
||||
format!("Duplicate link config name found: '{}'", config.name),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(source) = source {
|
||||
for config in &source.config {
|
||||
// we only need to check for uniqueness of configs with properties
|
||||
if config.properties.is_none() {
|
||||
continue;
|
||||
}
|
||||
// Check if config name is unique
|
||||
if !link_config_names.insert(config.name.clone()) {
|
||||
failures.push(ValidationFailure::new(
|
||||
ValidationFailureLevel::Error,
|
||||
format!("Duplicate link config name found: '{}'", config.name),
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
failures
|
||||
}
|
||||
|
||||
/// Funtion to validate the component configs
|
||||
/// from 0.13.0 source_config is deprecated and replaced with source:config:
|
||||
/// this function validates the raw yaml to check for deprecated source_config and target_config
|
||||
pub fn validate_components_configs(application: &serde_yaml::Value) -> Vec<ValidationFailure> {
|
||||
let mut failures = Vec::new();
|
||||
|
||||
if let Some(specs) = application.get("spec") {
|
||||
if let Some(components) = specs.get("components") {
|
||||
if let Some(components_sequence) = components.as_sequence() {
|
||||
for component in components_sequence.iter() {
|
||||
failures.extend(get_deprecated_configs(component));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
failures
|
||||
}
|
||||
|
||||
fn get_deprecated_configs(component: &serde_yaml::Value) -> Vec<ValidationFailure> {
|
||||
let mut failures = vec![];
|
||||
if let Some(traits) = component.get("traits") {
|
||||
if let Some(traits_sequence) = traits.as_sequence() {
|
||||
for trait_ in traits_sequence.iter() {
|
||||
if let Some(trait_type) = trait_.get("type") {
|
||||
if trait_type.ne("link") {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if let Some(trait_properties) = trait_.get("properties") {
|
||||
if trait_properties.get("source_config").is_some() {
|
||||
failures.push(ValidationFailure {
|
||||
level: ValidationFailureLevel::Warning,
|
||||
msg: "one of the components' link trait contains a source_config key, please use source:config: rather".to_string(),
|
||||
});
|
||||
}
|
||||
if trait_properties.get("target_config").is_some() {
|
||||
failures.push(ValidationFailure {
|
||||
level: ValidationFailureLevel::Warning,
|
||||
msg: "one of the components' link trait contains a target_config key, please use target:config: rather".to_string(),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
failures
|
||||
}
|
||||
|
||||
/// This function validates that a key/value pair is a valid OAM label. It's using fairly
|
||||
/// basic validation rules to ensure that the manifest isn't doing anything horribly wrong. Keeping
|
||||
/// this function free of regex is intentional to keep this code functional but simple.
|
||||
|
@ -608,6 +831,51 @@ pub fn is_valid_label_name(name: &str) -> bool {
|
|||
.all(|c| c.is_ascii_alphanumeric() || c == '-' || c == '_' || c == '.')
|
||||
}
|
||||
|
||||
/// Checks whether a manifest contains "duplicate" links.
|
||||
///
|
||||
/// Multiple links from the same source with the same name, namespace, package and interface
|
||||
/// are considered duplicate links.
|
||||
fn check_duplicate_links(manifest: &Manifest) -> Vec<ValidationFailure> {
|
||||
let mut failures = Vec::new();
|
||||
for component in manifest.components() {
|
||||
let mut link_ids = HashSet::new();
|
||||
for link in component.links() {
|
||||
if let TraitProperty::Link(LinkProperty {
|
||||
name,
|
||||
namespace,
|
||||
package,
|
||||
interfaces,
|
||||
..
|
||||
}) = &link.properties
|
||||
{
|
||||
for interface in interfaces {
|
||||
if !link_ids.insert((
|
||||
name.clone()
|
||||
.unwrap_or_else(|| DEFAULT_LINK_NAME.to_string()),
|
||||
namespace,
|
||||
package,
|
||||
interface,
|
||||
)) {
|
||||
failures.push(ValidationFailure::new(
|
||||
ValidationFailureLevel::Error,
|
||||
format!(
|
||||
"Duplicate link found inside component '{}': {} ({}:{}/{})",
|
||||
component.name,
|
||||
name.clone()
|
||||
.unwrap_or_else(|| DEFAULT_LINK_NAME.to_string()),
|
||||
namespace,
|
||||
package,
|
||||
interface
|
||||
),
|
||||
));
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
failures
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::is_valid_manifest_name;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
[wadm]
|
||||
path = "../../../wit/wadm"
|
||||
sha256 = "30b945b53e5dc1220f25da83449571e119cfd4029647a1908e5658d72335424e"
|
||||
sha512 = "bbd7e5883dc4014ea246a33cf9386b11803cb330854e5691af526971c7131ad358eec9ad8f6dbf0ccd20efe0fedb43a3304f8e9538832d73cce7db09f82f1176"
|
||||
sha256 = "9795ab1a83023da07da2dc28d930004bd913b9dbf07d68d9ef9207a44348a169"
|
||||
sha512 = "9a94f33fd861912c81efd441cd19cc8066dbb2df5c2236d0472b66294bddc20ec5ad569484be18334d8c104ae9647b2c81c9878210ac35694ad8ba4a5b3780be"
|
||||
|
|
|
@ -73,6 +73,7 @@ interface types {
|
|||
deployed,
|
||||
failed,
|
||||
waiting,
|
||||
unhealthy
|
||||
}
|
||||
|
||||
enum deploy-result {
|
||||
|
@ -117,7 +118,8 @@ interface types {
|
|||
|
||||
// Properties for a component
|
||||
record component-properties {
|
||||
image: string,
|
||||
image: option<string>,
|
||||
application: option<shared-application-component-properties>,
|
||||
id: option<string>,
|
||||
config: list<config-property>,
|
||||
secrets: list<secret-property>,
|
||||
|
@ -125,7 +127,8 @@ interface types {
|
|||
|
||||
// Properties for a capability
|
||||
record capability-properties {
|
||||
image: string,
|
||||
image: option<string>,
|
||||
application: option<shared-application-component-properties>,
|
||||
id: option<string>,
|
||||
config: list<config-property>,
|
||||
secrets: list<secret-property>,
|
||||
|
@ -187,6 +190,12 @@ interface types {
|
|||
version: option<string>,
|
||||
}
|
||||
|
||||
// Shared application component properties
|
||||
record shared-application-component-properties {
|
||||
name: string,
|
||||
component: string
|
||||
}
|
||||
|
||||
// Target configuration
|
||||
record target-config {
|
||||
name: string,
|
||||
|
@ -206,4 +215,4 @@ interface types {
|
|||
requirements: list<tuple<string, string>>,
|
||||
weight: option<u32>,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
[package]
|
||||
name = "wadm"
|
||||
description = "wasmCloud Application Deployment Manager: A tool for running Wasm applications in wasmCloud"
|
||||
version = "0.14.0"
|
||||
version.workspace = true
|
||||
edition = "2021"
|
||||
authors = ["wasmCloud Team"]
|
||||
keywords = ["webassembly", "wasmcloud", "wadm"]
|
||||
|
@ -9,21 +9,29 @@ license = "Apache-2.0"
|
|||
readme = "../../README.md"
|
||||
repository = "https://github.com/wasmcloud/wadm"
|
||||
|
||||
[features]
|
||||
# Enables clap attributes on the wadm configuration struct
|
||||
cli = ["clap"]
|
||||
http_admin = ["http", "http-body-util", "hyper", "hyper-util"]
|
||||
default = []
|
||||
|
||||
[package.metadata.cargo-machete]
|
||||
ignored = ["cloudevents-sdk"]
|
||||
|
||||
[dependencies]
|
||||
anyhow = { workspace = true }
|
||||
async-nats = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
base64 = { workspace = true }
|
||||
bytes = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
clap = { workspace = true, optional = true, features = ["derive", "cargo", "env"]}
|
||||
cloudevents-sdk = { workspace = true }
|
||||
http = { workspace = true, features = ["std"], optional = true }
|
||||
http-body-util = { workspace = true, optional = true }
|
||||
hyper = { workspace = true, optional = true }
|
||||
hyper-util = { workspace = true, features = ["server"], optional = true }
|
||||
futures = { workspace = true }
|
||||
indexmap = { workspace = true, features = ["serde"] }
|
||||
jsonschema = { workspace = true }
|
||||
lazy_static = { workspace = true }
|
||||
nkeys = { workspace = true }
|
||||
rand = { workspace = true, features = ["small_rng"] }
|
||||
regex = { workspace = true }
|
||||
semver = { workspace = true, features = ["serde"] }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
|
|
|
@ -2,11 +2,12 @@
|
|||
|
||||
use std::{
|
||||
collections::{BTreeMap, HashMap},
|
||||
error::Error,
|
||||
hash::{Hash, Hasher},
|
||||
};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use wasmcloud_control_interface::InterfaceLinkDefinition;
|
||||
use wasmcloud_control_interface::Link;
|
||||
|
||||
use crate::{
|
||||
events::{ComponentScaleFailed, ComponentScaled, Event, ProviderStartFailed, ProviderStarted},
|
||||
|
@ -235,18 +236,20 @@ pub struct PutLink {
|
|||
pub model_name: String,
|
||||
}
|
||||
|
||||
impl From<PutLink> for InterfaceLinkDefinition {
|
||||
fn from(value: PutLink) -> InterfaceLinkDefinition {
|
||||
InterfaceLinkDefinition {
|
||||
source_id: value.source_id,
|
||||
target: value.target,
|
||||
name: value.name,
|
||||
wit_namespace: value.wit_namespace,
|
||||
wit_package: value.wit_package,
|
||||
interfaces: value.interfaces,
|
||||
source_config: value.source_config,
|
||||
target_config: value.target_config,
|
||||
}
|
||||
impl TryFrom<PutLink> for Link {
|
||||
type Error = Box<dyn Error + Send + Sync>;
|
||||
|
||||
fn try_from(value: PutLink) -> Result<Link, Self::Error> {
|
||||
Link::builder()
|
||||
.source_id(&value.source_id)
|
||||
.target(&value.target)
|
||||
.name(&value.name)
|
||||
.wit_namespace(&value.wit_namespace)
|
||||
.wit_package(&value.wit_package)
|
||||
.interfaces(value.interfaces)
|
||||
.source_config(value.source_config)
|
||||
.target_config(value.target_config)
|
||||
.build()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,306 @@
|
|||
#[cfg(feature = "http_admin")]
|
||||
use core::net::SocketAddr;
|
||||
use std::path::PathBuf;
|
||||
|
||||
#[cfg(feature = "cli")]
|
||||
use clap::Parser;
|
||||
use wadm_types::api::DEFAULT_WADM_TOPIC_PREFIX;
|
||||
|
||||
use crate::nats::StreamPersistence;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
#[cfg_attr(feature = "cli", derive(Parser))]
|
||||
#[cfg_attr(feature = "cli", command(name = clap::crate_name!(), version = clap::crate_version!(), about = "wasmCloud Application Deployment Manager", long_about = None))]
|
||||
pub struct WadmConfig {
|
||||
/// The ID for this wadm process. Defaults to a random UUIDv4 if none is provided. This is used
|
||||
/// to help with debugging when identifying which process is doing the work
|
||||
#[cfg_attr(
|
||||
feature = "cli",
|
||||
arg(short = 'i', long = "host-id", env = "WADM_HOST_ID")
|
||||
)]
|
||||
pub host_id: Option<String>,
|
||||
|
||||
/// Whether or not to use structured log output (as JSON)
|
||||
#[cfg_attr(
|
||||
feature = "cli",
|
||||
arg(
|
||||
short = 'l',
|
||||
long = "structured-logging",
|
||||
default_value = "false",
|
||||
env = "WADM_STRUCTURED_LOGGING"
|
||||
)
|
||||
)]
|
||||
pub structured_logging: bool,
|
||||
|
||||
/// Whether or not to enable opentelemetry tracing
|
||||
#[cfg_attr(
|
||||
feature = "cli",
|
||||
arg(
|
||||
short = 't',
|
||||
long = "tracing",
|
||||
default_value = "false",
|
||||
env = "WADM_TRACING_ENABLED"
|
||||
)
|
||||
)]
|
||||
pub tracing_enabled: bool,
|
||||
|
||||
/// The endpoint to use for tracing. Setting this flag enables tracing, even if --tracing is set
|
||||
/// to false. Defaults to http://localhost:4318/v1/traces if not set and tracing is enabled
|
||||
#[cfg_attr(
|
||||
feature = "cli",
|
||||
arg(short = 'e', long = "tracing-endpoint", env = "WADM_TRACING_ENDPOINT")
|
||||
)]
|
||||
pub tracing_endpoint: Option<String>,
|
||||
|
||||
/// The NATS JetStream domain to connect to
|
||||
#[cfg_attr(feature = "cli", arg(short = 'd', env = "WADM_JETSTREAM_DOMAIN"))]
|
||||
pub domain: Option<String>,
|
||||
|
||||
/// (Advanced) Tweak the maximum number of jobs to run for handling events and commands. Be
|
||||
/// careful how you use this as it can affect performance
|
||||
#[cfg_attr(
|
||||
feature = "cli",
|
||||
arg(short = 'j', long = "max-jobs", env = "WADM_MAX_JOBS")
|
||||
)]
|
||||
pub max_jobs: Option<usize>,
|
||||
|
||||
/// The URL of the nats server you want to connect to
|
||||
#[cfg_attr(
|
||||
feature = "cli",
|
||||
arg(
|
||||
short = 's',
|
||||
long = "nats-server",
|
||||
env = "WADM_NATS_SERVER",
|
||||
default_value = "127.0.0.1:4222"
|
||||
)
|
||||
)]
|
||||
pub nats_server: String,
|
||||
|
||||
/// Use the specified nkey file or seed literal for authentication. Must be used in conjunction with --nats-jwt
|
||||
#[cfg_attr(
|
||||
feature = "cli",
|
||||
arg(
|
||||
long = "nats-seed",
|
||||
env = "WADM_NATS_NKEY",
|
||||
conflicts_with = "nats_creds",
|
||||
requires = "nats_jwt"
|
||||
)
|
||||
)]
|
||||
pub nats_seed: Option<String>,
|
||||
|
||||
/// Use the specified jwt file or literal for authentication. Must be used in conjunction with --nats-nkey
|
||||
#[cfg_attr(
|
||||
feature = "cli",
|
||||
arg(
|
||||
long = "nats-jwt",
|
||||
env = "WADM_NATS_JWT",
|
||||
conflicts_with = "nats_creds",
|
||||
requires = "nats_seed"
|
||||
)
|
||||
)]
|
||||
pub nats_jwt: Option<String>,
|
||||
|
||||
/// (Optional) NATS credential file to use when authenticating
|
||||
#[cfg_attr(
|
||||
feature = "cli", arg(
|
||||
long = "nats-creds-file",
|
||||
env = "WADM_NATS_CREDS_FILE",
|
||||
conflicts_with_all = ["nats_seed", "nats_jwt"],
|
||||
))]
|
||||
pub nats_creds: Option<PathBuf>,
|
||||
|
||||
/// (Optional) NATS TLS certificate file to use when authenticating
|
||||
#[cfg_attr(
|
||||
feature = "cli",
|
||||
arg(long = "nats-tls-ca-file", env = "WADM_NATS_TLS_CA_FILE")
|
||||
)]
|
||||
pub nats_tls_ca_file: Option<PathBuf>,
|
||||
|
||||
/// Name of the bucket used for storage of lattice state
|
||||
#[cfg_attr(
|
||||
feature = "cli",
|
||||
arg(
|
||||
long = "state-bucket-name",
|
||||
env = "WADM_STATE_BUCKET_NAME",
|
||||
default_value = "wadm_state"
|
||||
)
|
||||
)]
|
||||
pub state_bucket: String,
|
||||
|
||||
/// The amount of time in seconds to give for hosts to fail to heartbeat and be removed from the
|
||||
/// store. By default, this is 70s because it is 2x the host heartbeat interval plus a little padding
|
||||
#[cfg_attr(
|
||||
feature = "cli",
|
||||
arg(
|
||||
long = "cleanup-interval",
|
||||
env = "WADM_CLEANUP_INTERVAL",
|
||||
default_value = "70"
|
||||
)
|
||||
)]
|
||||
pub cleanup_interval: u64,
|
||||
|
||||
/// The API topic prefix to use. This is an advanced setting that should only be used if you
|
||||
/// know what you are doing
|
||||
#[cfg_attr(
|
||||
feature = "cli", arg(
|
||||
long = "api-prefix",
|
||||
env = "WADM_API_PREFIX",
|
||||
default_value = DEFAULT_WADM_TOPIC_PREFIX
|
||||
))]
|
||||
pub api_prefix: String,
|
||||
|
||||
/// This prefix to used for the internal streams. When running in a multitenant environment,
|
||||
/// clients share the same JS domain (since messages need to come from lattices).
|
||||
/// Setting a stream prefix makes it possible to have a separate stream for different wadms running in a multitenant environment.
|
||||
/// This is an advanced setting that should only be used if you know what you are doing.
|
||||
#[cfg_attr(
|
||||
feature = "cli",
|
||||
arg(long = "stream-prefix", env = "WADM_STREAM_PREFIX")
|
||||
)]
|
||||
pub stream_prefix: Option<String>,
|
||||
|
||||
/// Name of the bucket used for storage of manifests
|
||||
#[cfg_attr(
|
||||
feature = "cli",
|
||||
arg(
|
||||
long = "manifest-bucket-name",
|
||||
env = "WADM_MANIFEST_BUCKET_NAME",
|
||||
default_value = "wadm_manifests"
|
||||
)
|
||||
)]
|
||||
pub manifest_bucket: String,
|
||||
|
||||
/// Run wadm in multitenant mode. This is for advanced multitenant use cases with segmented NATS
|
||||
/// account traffic and not simple cases where all lattices use credentials from the same
|
||||
/// account. See the deployment guide for more information
|
||||
#[cfg_attr(
|
||||
feature = "cli",
|
||||
arg(long = "multitenant", env = "WADM_MULTITENANT", hide = true)
|
||||
)]
|
||||
pub multitenant: bool,
|
||||
|
||||
//
|
||||
// Max bytes configuration for streams. Primarily configurable to enable deployment on NATS infra
|
||||
// with limited resources.
|
||||
//
|
||||
/// Maximum bytes to keep for the state bucket
|
||||
#[cfg_attr(
|
||||
feature = "cli", arg(
|
||||
long = "state-bucket-max-bytes",
|
||||
env = "WADM_STATE_BUCKET_MAX_BYTES",
|
||||
default_value_t = -1,
|
||||
hide = true
|
||||
))]
|
||||
pub max_state_bucket_bytes: i64,
|
||||
/// Maximum bytes to keep for the manifest bucket
|
||||
#[cfg_attr(
|
||||
feature = "cli", arg(
|
||||
long = "manifest-bucket-max-bytes",
|
||||
env = "WADM_MANIFEST_BUCKET_MAX_BYTES",
|
||||
default_value_t = -1,
|
||||
hide = true
|
||||
))]
|
||||
pub max_manifest_bucket_bytes: i64,
|
||||
/// Nats streams storage type
|
||||
#[cfg_attr(
|
||||
feature = "cli", arg(
|
||||
long = "stream-persistence",
|
||||
env = "WADM_STREAM_PERSISTENCE",
|
||||
default_value_t = StreamPersistence::File
|
||||
))]
|
||||
pub stream_persistence: StreamPersistence,
|
||||
/// Maximum bytes to keep for the command stream
|
||||
#[cfg_attr(
|
||||
feature = "cli", arg(
|
||||
long = "command-stream-max-bytes",
|
||||
env = "WADM_COMMAND_STREAM_MAX_BYTES",
|
||||
default_value_t = -1,
|
||||
hide = true
|
||||
))]
|
||||
pub max_command_stream_bytes: i64,
|
||||
/// Maximum bytes to keep for the event stream
|
||||
#[cfg_attr(
|
||||
feature = "cli", arg(
|
||||
long = "event-stream-max-bytes",
|
||||
env = "WADM_EVENT_STREAM_MAX_BYTES",
|
||||
default_value_t = -1,
|
||||
hide = true
|
||||
))]
|
||||
pub max_event_stream_bytes: i64,
|
||||
/// Maximum bytes to keep for the event consumer stream
|
||||
#[cfg_attr(
|
||||
feature = "cli", arg(
|
||||
long = "event-consumer-stream-max-bytes",
|
||||
env = "WADM_EVENT_CONSUMER_STREAM_MAX_BYTES",
|
||||
default_value_t = -1,
|
||||
hide = true
|
||||
))]
|
||||
pub max_event_consumer_stream_bytes: i64,
|
||||
/// Maximum bytes to keep for the status stream
|
||||
#[cfg_attr(
|
||||
feature = "cli", arg(
|
||||
long = "status-stream-max-bytes",
|
||||
env = "WADM_STATUS_STREAM_MAX_BYTES",
|
||||
default_value_t = -1,
|
||||
hide = true
|
||||
))]
|
||||
pub max_status_stream_bytes: i64,
|
||||
/// Maximum bytes to keep for the notify stream
|
||||
#[cfg_attr(
|
||||
feature = "cli", arg(
|
||||
long = "notify-stream-max-bytes",
|
||||
env = "WADM_NOTIFY_STREAM_MAX_BYTES",
|
||||
default_value_t = -1,
|
||||
hide = true
|
||||
))]
|
||||
pub max_notify_stream_bytes: i64,
|
||||
/// Maximum bytes to keep for the wasmbus event stream
|
||||
#[cfg_attr(
|
||||
feature = "cli", arg(
|
||||
long = "wasmbus-event-stream-max-bytes",
|
||||
env = "WADM_WASMBUS_EVENT_STREAM_MAX_BYTES",
|
||||
default_value_t = -1,
|
||||
hide = true
|
||||
))]
|
||||
pub max_wasmbus_event_stream_bytes: i64,
|
||||
|
||||
#[cfg(feature = "http_admin")]
|
||||
#[cfg_attr(feature = "cli", clap(long = "http-admin", env = "WADM_HTTP_ADMIN"))]
|
||||
/// HTTP administration endpoint address
|
||||
pub http_admin: Option<SocketAddr>,
|
||||
}
|
||||
|
||||
impl Default for WadmConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
host_id: None,
|
||||
domain: None,
|
||||
max_jobs: None,
|
||||
nats_server: "127.0.0.1:4222".to_string(),
|
||||
nats_seed: None,
|
||||
nats_jwt: None,
|
||||
nats_creds: None,
|
||||
nats_tls_ca_file: None,
|
||||
state_bucket: "wadm_state".to_string(),
|
||||
cleanup_interval: 70,
|
||||
api_prefix: DEFAULT_WADM_TOPIC_PREFIX.to_string(),
|
||||
stream_prefix: None,
|
||||
manifest_bucket: "wadm_manifests".to_string(),
|
||||
multitenant: false,
|
||||
max_state_bucket_bytes: -1,
|
||||
max_manifest_bucket_bytes: -1,
|
||||
stream_persistence: StreamPersistence::File,
|
||||
max_command_stream_bytes: -1,
|
||||
max_event_stream_bytes: -1,
|
||||
max_event_consumer_stream_bytes: -1,
|
||||
max_status_stream_bytes: -1,
|
||||
max_notify_stream_bytes: -1,
|
||||
max_wasmbus_event_stream_bytes: -1,
|
||||
structured_logging: false,
|
||||
tracing_enabled: false,
|
||||
tracing_endpoint: None,
|
||||
#[cfg(feature = "http_admin")]
|
||||
http_admin: None,
|
||||
}
|
||||
}
|
||||
}
|
|
@ -11,9 +11,7 @@ use std::{
|
|||
use cloudevents::{AttributesReader, Data, Event as CloudEvent, EventBuilder, EventBuilderV10};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use thiserror::Error;
|
||||
use wasmcloud_control_interface::{
|
||||
ComponentDescription, InterfaceLinkDefinition, ProviderDescription,
|
||||
};
|
||||
use wasmcloud_control_interface::{ComponentDescription, Link, ProviderDescription};
|
||||
|
||||
use wadm_types::Manifest;
|
||||
|
||||
|
@ -424,7 +422,7 @@ event_impl!(
|
|||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
|
||||
pub struct LinkdefSet {
|
||||
#[serde(flatten)]
|
||||
pub linkdef: InterfaceLinkDefinition,
|
||||
pub linkdef: Link,
|
||||
}
|
||||
|
||||
event_impl!(LinkdefSet, "com.wasmcloud.lattice.linkdef_set");
|
||||
|
|
|
@ -1,6 +1,38 @@
|
|||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::Result;
|
||||
use async_nats::jetstream::{stream::Stream, Context};
|
||||
use config::WadmConfig;
|
||||
use tokio::{sync::Semaphore, task::JoinSet};
|
||||
use tracing::log::debug;
|
||||
|
||||
#[cfg(feature = "http_admin")]
|
||||
use anyhow::Context as _;
|
||||
#[cfg(feature = "http_admin")]
|
||||
use hyper::body::Bytes;
|
||||
#[cfg(feature = "http_admin")]
|
||||
use hyper_util::rt::{TokioExecutor, TokioIo};
|
||||
#[cfg(feature = "http_admin")]
|
||||
use tokio::net::TcpListener;
|
||||
|
||||
use crate::{
|
||||
connections::ControlClientConstructor,
|
||||
consumers::{
|
||||
manager::{ConsumerManager, WorkerCreator},
|
||||
*,
|
||||
},
|
||||
nats_utils::LatticeIdParser,
|
||||
scaler::manager::{ScalerManager, WADM_NOTIFY_PREFIX},
|
||||
server::{ManifestNotifier, Server},
|
||||
storage::{nats_kv::NatsKvStore, reaper::Reaper},
|
||||
workers::{CommandPublisher, CommandWorker, EventWorker, StatusPublisher},
|
||||
};
|
||||
|
||||
pub use nats::StreamPersistence;
|
||||
|
||||
pub mod commands;
|
||||
pub mod config;
|
||||
pub mod consumers;
|
||||
pub mod events;
|
||||
pub mod nats_utils;
|
||||
|
@ -10,7 +42,10 @@ pub mod server;
|
|||
pub mod storage;
|
||||
pub mod workers;
|
||||
|
||||
mod connections;
|
||||
pub(crate) mod model;
|
||||
mod nats;
|
||||
mod observer;
|
||||
#[cfg(test)]
|
||||
pub mod test_util;
|
||||
|
||||
|
@ -39,3 +74,406 @@ pub const APP_SPEC_ANNOTATION: &str = "wasmcloud.dev/appspec";
|
|||
pub const SCALER_KEY: &str = "wasmcloud.dev/scaler";
|
||||
/// The default link name. In the future, this will likely be pulled in from another crate
|
||||
pub const DEFAULT_LINK_NAME: &str = "default";
|
||||
/// Default stream name for wadm events
|
||||
pub const DEFAULT_WADM_EVENT_STREAM_NAME: &str = "wadm_events";
|
||||
/// Default stream name for wadm event consumer
|
||||
pub const DEFAULT_WADM_EVENT_CONSUMER_STREAM_NAME: &str = "wadm_event_consumer";
|
||||
/// Default stream name for wadm commands
|
||||
pub const DEFAULT_COMMAND_STREAM_NAME: &str = "wadm_commands";
|
||||
/// Default stream name for wadm status
|
||||
pub const DEFAULT_STATUS_STREAM_NAME: &str = "wadm_status";
|
||||
/// Default stream name for wadm notifications
|
||||
pub const DEFAULT_NOTIFY_STREAM_NAME: &str = "wadm_notify";
|
||||
/// Default stream name for wasmbus events
|
||||
pub const DEFAULT_WASMBUS_EVENT_STREAM_NAME: &str = "wasmbus_events";
|
||||
|
||||
/// Start wadm with the provided [WadmConfig], returning [JoinSet] with two tasks:
|
||||
/// 1. The server task that listens for API requests
|
||||
/// 2. The observer task that listens for events and commands
|
||||
///
|
||||
/// When embedding wadm in another application, this function should be called to start the wadm
|
||||
/// server and observer tasks.
|
||||
///
|
||||
/// # Usage
|
||||
///
|
||||
/// ```no_run
|
||||
/// async {
|
||||
/// let config = wadm::config::WadmConfig::default();
|
||||
/// let mut wadm = wadm::start_wadm(config).await.expect("should start wadm");
|
||||
/// tokio::select! {
|
||||
/// res = wadm.join_next() => {
|
||||
/// match res {
|
||||
/// Some(Ok(_)) => {
|
||||
/// tracing::info!("WADM has exited successfully");
|
||||
/// std::process::exit(0);
|
||||
/// }
|
||||
/// Some(Err(e)) => {
|
||||
/// tracing::error!("WADM has exited with an error: {:?}", e);
|
||||
/// std::process::exit(1);
|
||||
/// }
|
||||
/// None => {
|
||||
/// tracing::info!("WADM server did not start");
|
||||
/// std::process::exit(0);
|
||||
/// }
|
||||
/// }
|
||||
/// }
|
||||
/// _ = tokio::signal::ctrl_c() => {
|
||||
/// tracing::info!("Received Ctrl+C, shutting down");
|
||||
/// std::process::exit(0);
|
||||
/// }
|
||||
/// }
|
||||
/// };
|
||||
/// ```
|
||||
pub async fn start_wadm(config: WadmConfig) -> Result<JoinSet<Result<()>>> {
|
||||
// Build storage adapter for lattice state (on by default)
|
||||
let (client, context) = nats::get_client_and_context(
|
||||
config.nats_server.clone(),
|
||||
config.domain.clone(),
|
||||
config.nats_seed.clone(),
|
||||
config.nats_jwt.clone(),
|
||||
config.nats_creds.clone(),
|
||||
config.nats_tls_ca_file.clone(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// TODO: We will probably need to set up all the flags (like lattice prefix and topic prefix) down the line
|
||||
let connection_pool = ControlClientConstructor::new(client.clone(), None);
|
||||
|
||||
let trimmer: &[_] = &['.', '>', '*'];
|
||||
|
||||
let store = nats::ensure_kv_bucket(
|
||||
&context,
|
||||
config.state_bucket,
|
||||
1,
|
||||
config.max_state_bucket_bytes,
|
||||
config.stream_persistence.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let state_storage = NatsKvStore::new(store);
|
||||
|
||||
let manifest_storage = nats::ensure_kv_bucket(
|
||||
&context,
|
||||
config.manifest_bucket,
|
||||
1,
|
||||
config.max_manifest_bucket_bytes,
|
||||
config.stream_persistence.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let internal_stream_name = |stream_name: &str| -> String {
|
||||
match config.stream_prefix.clone() {
|
||||
Some(stream_prefix) => {
|
||||
format!(
|
||||
"{}.{}",
|
||||
stream_prefix.trim_end_matches(trimmer),
|
||||
stream_name
|
||||
)
|
||||
}
|
||||
None => stream_name.to_string(),
|
||||
}
|
||||
};
|
||||
|
||||
debug!("Ensuring wadm event stream");
|
||||
|
||||
let event_stream = nats::ensure_limits_stream(
|
||||
&context,
|
||||
internal_stream_name(DEFAULT_WADM_EVENT_STREAM_NAME),
|
||||
vec![DEFAULT_WADM_EVENTS_TOPIC.to_owned()],
|
||||
Some(
|
||||
"A stream that stores all events coming in on the wadm.evt subject in a cluster"
|
||||
.to_string(),
|
||||
),
|
||||
config.max_event_stream_bytes,
|
||||
config.stream_persistence.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
debug!("Ensuring command stream");
|
||||
|
||||
let command_stream = nats::ensure_stream(
|
||||
&context,
|
||||
internal_stream_name(DEFAULT_COMMAND_STREAM_NAME),
|
||||
vec![DEFAULT_COMMANDS_TOPIC.to_owned()],
|
||||
Some("A stream that stores all commands for wadm".to_string()),
|
||||
config.max_command_stream_bytes,
|
||||
config.stream_persistence.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let status_stream = nats::ensure_status_stream(
|
||||
&context,
|
||||
internal_stream_name(DEFAULT_STATUS_STREAM_NAME),
|
||||
vec![DEFAULT_STATUS_TOPIC.to_owned()],
|
||||
config.max_status_stream_bytes,
|
||||
config.stream_persistence.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
debug!("Ensuring wasmbus event stream");
|
||||
|
||||
// Remove the previous wadm_(multitenant)_mirror streams so that they don't
|
||||
// prevent us from creating the new wasmbus_(multitenant)_events stream
|
||||
// TODO(joonas): Remove this some time in the future once we're confident
|
||||
// enough that there are no more wadm_(multitenant)_mirror streams around.
|
||||
for mirror_stream_name in &["wadm_mirror", "wadm_multitenant_mirror"] {
|
||||
if (context.get_stream(mirror_stream_name).await).is_ok() {
|
||||
context.delete_stream(mirror_stream_name).await?;
|
||||
}
|
||||
}
|
||||
|
||||
let wasmbus_event_subjects = match config.multitenant {
|
||||
true => vec![DEFAULT_MULTITENANT_EVENTS_TOPIC.to_owned()],
|
||||
false => vec![DEFAULT_EVENTS_TOPIC.to_owned()],
|
||||
};
|
||||
|
||||
let wasmbus_event_stream = nats::ensure_limits_stream(
|
||||
&context,
|
||||
DEFAULT_WASMBUS_EVENT_STREAM_NAME.to_string(),
|
||||
wasmbus_event_subjects.clone(),
|
||||
Some(
|
||||
"A stream that stores all events coming in on the wasmbus.evt subject in a cluster"
|
||||
.to_string(),
|
||||
),
|
||||
config.max_wasmbus_event_stream_bytes,
|
||||
config.stream_persistence.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
debug!("Ensuring notify stream");
|
||||
|
||||
let notify_stream = nats::ensure_notify_stream(
|
||||
&context,
|
||||
DEFAULT_NOTIFY_STREAM_NAME.to_owned(),
|
||||
vec![format!("{WADM_NOTIFY_PREFIX}.*")],
|
||||
config.max_notify_stream_bytes,
|
||||
config.stream_persistence.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
debug!("Ensuring event consumer stream");
|
||||
|
||||
let event_consumer_stream = nats::ensure_event_consumer_stream(
|
||||
&context,
|
||||
DEFAULT_WADM_EVENT_CONSUMER_STREAM_NAME.to_owned(),
|
||||
DEFAULT_WADM_EVENT_CONSUMER_TOPIC.to_owned(),
|
||||
vec![&wasmbus_event_stream, &event_stream],
|
||||
Some(
|
||||
"A stream that sources from wadm_events and wasmbus_events for wadm event consumer's use"
|
||||
.to_string(),
|
||||
),
|
||||
config.max_event_consumer_stream_bytes,
|
||||
config.stream_persistence.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
debug!("Creating event consumer manager");
|
||||
|
||||
let permit_pool = Arc::new(Semaphore::new(
|
||||
config.max_jobs.unwrap_or(Semaphore::MAX_PERMITS),
|
||||
));
|
||||
let event_worker_creator = EventWorkerCreator {
|
||||
state_store: state_storage.clone(),
|
||||
manifest_store: manifest_storage.clone(),
|
||||
pool: connection_pool.clone(),
|
||||
command_topic_prefix: DEFAULT_COMMANDS_TOPIC.trim_matches(trimmer).to_owned(),
|
||||
publisher: context.clone(),
|
||||
notify_stream,
|
||||
status_stream: status_stream.clone(),
|
||||
};
|
||||
let events_manager: ConsumerManager<EventConsumer> = ConsumerManager::new(
|
||||
permit_pool.clone(),
|
||||
event_consumer_stream,
|
||||
event_worker_creator.clone(),
|
||||
config.multitenant,
|
||||
)
|
||||
.await;
|
||||
|
||||
debug!("Creating command consumer manager");
|
||||
|
||||
let command_worker_creator = CommandWorkerCreator {
|
||||
pool: connection_pool,
|
||||
};
|
||||
let commands_manager: ConsumerManager<CommandConsumer> = ConsumerManager::new(
|
||||
permit_pool.clone(),
|
||||
command_stream,
|
||||
command_worker_creator.clone(),
|
||||
config.multitenant,
|
||||
)
|
||||
.await;
|
||||
|
||||
// TODO(thomastaylor312): We might want to figure out how not to run this globally. Doing a
|
||||
// synthetic event sent to the stream could be nice, but all the wadm processes would still fire
|
||||
// off that tick, resulting in multiple people handling. We could maybe get it to work with the
|
||||
// right duplicate window, but we have no idea when each process could fire a tick. Worst case
|
||||
// scenario right now is that multiple fire simultaneously and a few of them just delete nothing
|
||||
let reaper = Reaper::new(
|
||||
state_storage.clone(),
|
||||
Duration::from_secs(config.cleanup_interval / 2),
|
||||
[],
|
||||
);
|
||||
|
||||
let wadm_event_prefix = DEFAULT_WADM_EVENTS_TOPIC.trim_matches(trimmer);
|
||||
|
||||
debug!("Creating lattice observer");
|
||||
|
||||
let observer = observer::Observer {
|
||||
parser: LatticeIdParser::new("wasmbus", config.multitenant),
|
||||
command_manager: commands_manager,
|
||||
event_manager: events_manager,
|
||||
reaper,
|
||||
client: client.clone(),
|
||||
command_worker_creator,
|
||||
event_worker_creator,
|
||||
};
|
||||
|
||||
debug!("Subscribing to API topic");
|
||||
|
||||
let server = Server::new(
|
||||
manifest_storage,
|
||||
client,
|
||||
Some(&config.api_prefix),
|
||||
config.multitenant,
|
||||
status_stream,
|
||||
ManifestNotifier::new(wadm_event_prefix, context),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut tasks = JoinSet::new();
|
||||
|
||||
#[cfg(feature = "http_admin")]
|
||||
if let Some(addr) = config.http_admin {
|
||||
debug!("Setting up HTTP administration endpoint");
|
||||
let socket = TcpListener::bind(addr)
|
||||
.await
|
||||
.context("failed to bind on HTTP administation endpoint")?;
|
||||
let svc = hyper::service::service_fn(move |req| {
|
||||
const OK: &str = r#"{"status":"ok"}"#;
|
||||
async move {
|
||||
let (http::request::Parts { method, uri, .. }, _) = req.into_parts();
|
||||
match (method.as_str(), uri.path()) {
|
||||
("HEAD", "/livez") => Ok(http::Response::default()),
|
||||
("GET", "/livez") => Ok(http::Response::new(http_body_util::Full::new(
|
||||
Bytes::from(OK),
|
||||
))),
|
||||
(method, "/livez") => http::Response::builder()
|
||||
.status(http::StatusCode::METHOD_NOT_ALLOWED)
|
||||
.body(http_body_util::Full::new(Bytes::from(format!(
|
||||
"method `{method}` not supported for path `/livez`"
|
||||
)))),
|
||||
("HEAD", "/readyz") => Ok(http::Response::default()),
|
||||
("GET", "/readyz") => Ok(http::Response::new(http_body_util::Full::new(
|
||||
Bytes::from(OK),
|
||||
))),
|
||||
(method, "/readyz") => http::Response::builder()
|
||||
.status(http::StatusCode::METHOD_NOT_ALLOWED)
|
||||
.body(http_body_util::Full::new(Bytes::from(format!(
|
||||
"method `{method}` not supported for path `/readyz`"
|
||||
)))),
|
||||
(.., path) => http::Response::builder()
|
||||
.status(http::StatusCode::NOT_FOUND)
|
||||
.body(http_body_util::Full::new(Bytes::from(format!(
|
||||
"unknown endpoint `{path}`"
|
||||
)))),
|
||||
}
|
||||
}
|
||||
});
|
||||
let srv = hyper_util::server::conn::auto::Builder::new(TokioExecutor::new());
|
||||
tasks.spawn(async move {
|
||||
loop {
|
||||
let stream = match socket.accept().await {
|
||||
Ok((stream, _)) => stream,
|
||||
Err(err) => {
|
||||
tracing::error!(?err, "failed to accept HTTP administration connection");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
if let Err(err) = srv.serve_connection(TokioIo::new(stream), svc).await {
|
||||
tracing::error!(?err, "failed to serve HTTP administration connection");
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// Subscribe and handle API requests
|
||||
tasks.spawn(server.serve());
|
||||
// Observe and handle events
|
||||
tasks.spawn(observer.observe(wasmbus_event_subjects));
|
||||
|
||||
Ok(tasks)
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct CommandWorkerCreator {
|
||||
pool: ControlClientConstructor,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl WorkerCreator for CommandWorkerCreator {
|
||||
type Output = CommandWorker;
|
||||
|
||||
async fn create(
|
||||
&self,
|
||||
lattice_id: &str,
|
||||
multitenant_prefix: Option<&str>,
|
||||
) -> anyhow::Result<Self::Output> {
|
||||
let client = self.pool.get_connection(lattice_id, multitenant_prefix);
|
||||
|
||||
Ok(CommandWorker::new(client))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct EventWorkerCreator<StateStore> {
|
||||
state_store: StateStore,
|
||||
manifest_store: async_nats::jetstream::kv::Store,
|
||||
pool: ControlClientConstructor,
|
||||
command_topic_prefix: String,
|
||||
publisher: Context,
|
||||
notify_stream: Stream,
|
||||
status_stream: Stream,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<StateStore> WorkerCreator for EventWorkerCreator<StateStore>
|
||||
where
|
||||
StateStore: crate::storage::Store + Send + Sync + Clone + 'static,
|
||||
{
|
||||
type Output = EventWorker<StateStore, wasmcloud_control_interface::Client, Context>;
|
||||
|
||||
async fn create(
|
||||
&self,
|
||||
lattice_id: &str,
|
||||
multitenant_prefix: Option<&str>,
|
||||
) -> anyhow::Result<Self::Output> {
|
||||
let client = self.pool.get_connection(lattice_id, multitenant_prefix);
|
||||
let command_publisher = CommandPublisher::new(
|
||||
self.publisher.clone(),
|
||||
&format!("{}.{lattice_id}", self.command_topic_prefix),
|
||||
);
|
||||
let status_publisher = StatusPublisher::new(
|
||||
self.publisher.clone(),
|
||||
Some(self.status_stream.clone()),
|
||||
&format!("wadm.status.{lattice_id}"),
|
||||
);
|
||||
let manager = ScalerManager::new(
|
||||
self.publisher.clone(),
|
||||
self.notify_stream.clone(),
|
||||
lattice_id,
|
||||
multitenant_prefix,
|
||||
self.state_store.clone(),
|
||||
self.manifest_store.clone(),
|
||||
command_publisher.clone(),
|
||||
status_publisher.clone(),
|
||||
client.clone(),
|
||||
)
|
||||
.await?;
|
||||
Ok(EventWorker::new(
|
||||
self.state_store.clone(),
|
||||
client,
|
||||
command_publisher,
|
||||
status_publisher,
|
||||
manager,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,17 +5,52 @@ use async_nats::{
|
|||
jetstream::{
|
||||
self,
|
||||
kv::{Config as KvConfig, Store},
|
||||
stream::{Config as StreamConfig, Source, Stream, SubjectTransform},
|
||||
stream::{Config as StreamConfig, Source, StorageType, Stream, SubjectTransform},
|
||||
Context,
|
||||
},
|
||||
Client, ConnectOptions,
|
||||
};
|
||||
|
||||
use tracing::warn;
|
||||
use wadm::DEFAULT_EXPIRY_TIME;
|
||||
use crate::DEFAULT_EXPIRY_TIME;
|
||||
use tracing::{debug, warn};
|
||||
|
||||
#[derive(Debug, Clone, Copy, Default)]
|
||||
pub enum StreamPersistence {
|
||||
#[default]
|
||||
File,
|
||||
Memory,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for StreamPersistence {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
StreamPersistence::File => write!(f, "file"),
|
||||
StreamPersistence::Memory => write!(f, "memory"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<StreamPersistence> for StorageType {
|
||||
fn from(persistance: StreamPersistence) -> Self {
|
||||
match persistance {
|
||||
StreamPersistence::File => StorageType::File,
|
||||
StreamPersistence::Memory => StorageType::Memory,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&str> for StreamPersistence {
|
||||
fn from(persistance: &str) -> Self {
|
||||
match persistance {
|
||||
"file" => StreamPersistence::File,
|
||||
"memory" => StreamPersistence::Memory,
|
||||
_ => StreamPersistence::File,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a NATS client from the given options
|
||||
pub async fn get_client_and_context(
|
||||
pub(crate) async fn get_client_and_context(
|
||||
url: String,
|
||||
js_domain: Option<String>,
|
||||
seed: Option<String>,
|
||||
|
@ -120,7 +155,10 @@ pub async fn ensure_stream(
|
|||
name: String,
|
||||
subjects: Vec<String>,
|
||||
description: Option<String>,
|
||||
max_bytes: i64,
|
||||
storage: StorageType,
|
||||
) -> Result<Stream> {
|
||||
debug!("Ensuring stream {name} exists");
|
||||
let stream_config = StreamConfig {
|
||||
name: name.clone(),
|
||||
description,
|
||||
|
@ -128,8 +166,9 @@ pub async fn ensure_stream(
|
|||
retention: async_nats::jetstream::stream::RetentionPolicy::WorkQueue,
|
||||
subjects,
|
||||
max_age: DEFAULT_EXPIRY_TIME,
|
||||
storage: async_nats::jetstream::stream::StorageType::File,
|
||||
allow_rollup: false,
|
||||
max_bytes,
|
||||
storage,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
|
@ -157,7 +196,10 @@ pub async fn ensure_limits_stream(
|
|||
name: String,
|
||||
subjects: Vec<String>,
|
||||
description: Option<String>,
|
||||
max_bytes: i64,
|
||||
storage: StorageType,
|
||||
) -> Result<Stream> {
|
||||
debug!("Ensuring stream {name} exists");
|
||||
let stream_config = StreamConfig {
|
||||
name: name.clone(),
|
||||
description,
|
||||
|
@ -165,8 +207,9 @@ pub async fn ensure_limits_stream(
|
|||
retention: async_nats::jetstream::stream::RetentionPolicy::Limits,
|
||||
subjects,
|
||||
max_age: DEFAULT_EXPIRY_TIME,
|
||||
storage: async_nats::jetstream::stream::StorageType::File,
|
||||
allow_rollup: false,
|
||||
max_bytes,
|
||||
storage,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
|
@ -195,7 +238,10 @@ pub async fn ensure_event_consumer_stream(
|
|||
subject: String,
|
||||
streams: Vec<&Stream>,
|
||||
description: Option<String>,
|
||||
max_bytes: i64,
|
||||
storage: StorageType,
|
||||
) -> Result<Stream> {
|
||||
debug!("Ensuring stream {name} exists");
|
||||
// This maps the upstream (wasmbus.evt.*.> & wadm.evt.*.>) Streams into
|
||||
// a set of configuration for the downstream wadm event consumer Stream
|
||||
// that consolidates them into a single set of subjects (wadm_event_consumer.evt.*.>)
|
||||
|
@ -234,8 +280,9 @@ pub async fn ensure_event_consumer_stream(
|
|||
subjects: vec![],
|
||||
max_age: DEFAULT_EXPIRY_TIME,
|
||||
sources: Some(sources),
|
||||
storage: async_nats::jetstream::stream::StorageType::File,
|
||||
allow_rollup: false,
|
||||
max_bytes,
|
||||
storage,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
|
@ -258,7 +305,10 @@ pub async fn ensure_status_stream(
|
|||
context: &Context,
|
||||
name: String,
|
||||
subjects: Vec<String>,
|
||||
max_bytes: i64,
|
||||
storage: StorageType,
|
||||
) -> Result<Stream> {
|
||||
debug!("Ensuring stream {name} exists");
|
||||
context
|
||||
.get_or_create_stream(StreamConfig {
|
||||
name,
|
||||
|
@ -271,7 +321,8 @@ pub async fn ensure_status_stream(
|
|||
max_messages_per_subject: 10,
|
||||
subjects,
|
||||
max_age: std::time::Duration::from_nanos(0),
|
||||
storage: async_nats::jetstream::stream::StorageType::File,
|
||||
max_bytes,
|
||||
storage,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
|
@ -283,7 +334,10 @@ pub async fn ensure_notify_stream(
|
|||
context: &Context,
|
||||
name: String,
|
||||
subjects: Vec<String>,
|
||||
max_bytes: i64,
|
||||
storage: StorageType,
|
||||
) -> Result<Stream> {
|
||||
debug!("Ensuring stream {name} exists");
|
||||
context
|
||||
.get_or_create_stream(StreamConfig {
|
||||
name,
|
||||
|
@ -292,7 +346,8 @@ pub async fn ensure_notify_stream(
|
|||
retention: async_nats::jetstream::stream::RetentionPolicy::Interest,
|
||||
subjects,
|
||||
max_age: DEFAULT_EXPIRY_TIME,
|
||||
storage: async_nats::jetstream::stream::StorageType::File,
|
||||
max_bytes,
|
||||
storage,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
|
@ -305,7 +360,10 @@ pub async fn ensure_kv_bucket(
|
|||
context: &Context,
|
||||
name: String,
|
||||
history_to_keep: i64,
|
||||
max_bytes: i64,
|
||||
storage: StorageType,
|
||||
) -> Result<Store> {
|
||||
debug!("Ensuring kv bucket {name} exists");
|
||||
if let Ok(kv) = context.get_key_value(&name).await {
|
||||
Ok(kv)
|
||||
} else {
|
||||
|
@ -314,7 +372,8 @@ pub async fn ensure_kv_bucket(
|
|||
bucket: name,
|
||||
history: history_to_keep,
|
||||
num_replicas: 1,
|
||||
storage: jetstream::stream::StorageType::File,
|
||||
storage,
|
||||
max_bytes,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
|
@ -330,7 +389,7 @@ mod test {
|
|||
#[tokio::test]
|
||||
async fn can_resolve_jwt_value_and_file() -> Result<()> {
|
||||
let my_jwt = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ2aWRlb0lkIjoiUWpVaUxYSnVjMjl0IiwiaWF0IjoxNjIwNjAzNDY5fQ.2PKx6y2ym6IWbeM6zFgHOkDnZEtGTR3YgYlQ2_Jki5g";
|
||||
let jwt_path = "./tests/fixtures/nats.jwt";
|
||||
let jwt_path = "../../tests/fixtures/nats.jwt";
|
||||
let jwt_inside_file = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdHJpbmciOiAiQWNjb3JkIHRvIGFsbCBrbm93biBsb3dzIG9mIGF2aWF0aW9uLCB0aGVyZSBpcyBubyB3YXkgdGhhdCBhIGJlZSBhYmxlIHRvIGZseSJ9.GyU6pTRhflcOg6KBCU6wZedP8BQzLXbdgYIoU6KzzD8";
|
||||
|
||||
assert_eq!(
|
|
@ -4,7 +4,7 @@ use async_nats::Subscriber;
|
|||
use futures::{stream::SelectAll, StreamExt, TryFutureExt};
|
||||
use tracing::{debug, error, instrument, trace, warn};
|
||||
|
||||
use wadm::{
|
||||
use crate::{
|
||||
consumers::{
|
||||
manager::{ConsumerManager, WorkerCreator},
|
||||
CommandConsumer, EventConsumer,
|
|
@ -0,0 +1,780 @@
|
|||
//! Contains code for converting the list of [`Component`]s in an application into a list of [`Scaler`]s
|
||||
//! that are responsible for monitoring and enforcing the desired state of a lattice
|
||||
|
||||
use std::{collections::HashMap, time::Duration};
|
||||
|
||||
use anyhow::Result;
|
||||
use tracing::{error, warn};
|
||||
use wadm_types::{
|
||||
api::StatusInfo, CapabilityProperties, Component, ComponentProperties, ConfigProperty,
|
||||
LinkProperty, Policy, Properties, SecretProperty, SharedApplicationComponentProperties,
|
||||
SpreadScalerProperty, Trait, TraitProperty, DAEMONSCALER_TRAIT, LINK_TRAIT, SPREADSCALER_TRAIT,
|
||||
};
|
||||
use wasmcloud_secrets_types::SECRET_PREFIX;
|
||||
|
||||
use crate::{
|
||||
publisher::Publisher,
|
||||
scaler::{
|
||||
spreadscaler::{link::LINK_SCALER_KIND, ComponentSpreadScaler, SPREAD_SCALER_KIND},
|
||||
statusscaler::StatusScaler,
|
||||
Scaler,
|
||||
},
|
||||
storage::{snapshot::SnapshotStore, ReadStore},
|
||||
workers::{ConfigSource, LinkSource, SecretSource},
|
||||
DEFAULT_LINK_NAME,
|
||||
};
|
||||
|
||||
use super::{
|
||||
configscaler::ConfigScaler,
|
||||
daemonscaler::{provider::ProviderDaemonScaler, ComponentDaemonScaler},
|
||||
secretscaler::SecretScaler,
|
||||
spreadscaler::{
|
||||
link::{LinkScaler, LinkScalerConfig},
|
||||
provider::{ProviderSpreadConfig, ProviderSpreadScaler},
|
||||
},
|
||||
BackoffWrapper,
|
||||
};
|
||||
|
||||
pub(crate) type BoxedScaler = Box<dyn Scaler + Send + Sync + 'static>;
|
||||
pub(crate) type ScalerList = Vec<BoxedScaler>;
|
||||
|
||||
const EMPTY_TRAIT_VEC: Vec<Trait> = Vec::new();
|
||||
|
||||
/// Converts a list of manifest [`Component`]s into a [`ScalerList`], resolving shared application
|
||||
/// references, links, configuration and secrets as necessary.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `components` - The list of components to convert
|
||||
/// * `policies` - The policies to use when creating the scalers so they can access secrets
|
||||
/// * `lattice_id` - The lattice id the scalers operate on
|
||||
/// * `notifier` - The publisher to use when creating the scalers so they can report status
|
||||
/// * `name` - The name of the manifest that the scalers are being created for
|
||||
/// * `notifier_subject` - The subject to use when creating the scalers so they can report status
|
||||
/// * `snapshot_data` - The store to use when creating the scalers so they can access lattice state
|
||||
pub(crate) fn manifest_components_to_scalers<S, P, L>(
|
||||
components: &[Component],
|
||||
policies: &HashMap<&String, &Policy>,
|
||||
lattice_id: &str,
|
||||
manifest_name: &str,
|
||||
notifier_subject: &str,
|
||||
notifier: &P,
|
||||
snapshot_data: &SnapshotStore<S, L>,
|
||||
) -> ScalerList
|
||||
where
|
||||
S: ReadStore + Send + Sync + Clone + 'static,
|
||||
P: Publisher + Clone + Send + Sync + 'static,
|
||||
L: LinkSource + ConfigSource + SecretSource + Clone + Send + Sync + 'static,
|
||||
{
|
||||
let mut scalers: ScalerList = Vec::new();
|
||||
components
|
||||
.iter()
|
||||
.for_each(|component| match &component.properties {
|
||||
Properties::Component { properties } => {
|
||||
// Determine if this component is contained in this manifest or a shared application
|
||||
let (application_name, component_name) = match resolve_manifest_component(
|
||||
manifest_name,
|
||||
&component.name,
|
||||
properties.image.as_ref(),
|
||||
properties.application.as_ref(),
|
||||
) {
|
||||
Ok(names) => names,
|
||||
Err(err) => {
|
||||
error!(err);
|
||||
scalers.push(Box::new(StatusScaler::new(
|
||||
uuid::Uuid::new_v4().to_string(),
|
||||
SPREAD_SCALER_KIND,
|
||||
&component.name,
|
||||
StatusInfo::failed(err),
|
||||
)) as BoxedScaler);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
component_scalers(
|
||||
&mut scalers,
|
||||
components,
|
||||
properties,
|
||||
component.traits.as_ref(),
|
||||
manifest_name,
|
||||
application_name,
|
||||
component_name,
|
||||
lattice_id,
|
||||
policies,
|
||||
notifier_subject,
|
||||
notifier,
|
||||
snapshot_data,
|
||||
)
|
||||
}
|
||||
Properties::Capability { properties } => {
|
||||
// Determine if this component is contained in this manifest or a shared application
|
||||
let (application_name, component_name) = match resolve_manifest_component(
|
||||
manifest_name,
|
||||
&component.name,
|
||||
properties.image.as_ref(),
|
||||
properties.application.as_ref(),
|
||||
) {
|
||||
Ok(names) => names,
|
||||
Err(err) => {
|
||||
error!(err);
|
||||
scalers.push(Box::new(StatusScaler::new(
|
||||
uuid::Uuid::new_v4().to_string(),
|
||||
SPREAD_SCALER_KIND,
|
||||
&component.name,
|
||||
StatusInfo::failed(err),
|
||||
)) as BoxedScaler);
|
||||
return;
|
||||
}
|
||||
};
|
||||
provider_scalers(
|
||||
&mut scalers,
|
||||
components,
|
||||
properties,
|
||||
component.traits.as_ref(),
|
||||
manifest_name,
|
||||
application_name,
|
||||
component_name,
|
||||
lattice_id,
|
||||
policies,
|
||||
notifier_subject,
|
||||
notifier,
|
||||
snapshot_data,
|
||||
)
|
||||
}
|
||||
});
|
||||
scalers
|
||||
}
|
||||
|
||||
/// Helper function, primarily to remove nesting, that extends a [`ScalerList`] with all scalers
|
||||
/// from a (Wasm) component [`Component`]
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `scalers` - The list of scalers to extend
|
||||
/// * `components` - The list of components to convert
|
||||
/// * `properties` - The properties of the component to convert
|
||||
/// * `traits` - The traits of the component to convert
|
||||
/// * `manifest_name` - The name of the manifest that the scalers are being created for
|
||||
/// * `application_name` - The name of the application that the scalers are being created for
|
||||
/// * `component_name` - The name of the component to convert
|
||||
/// * **The following arguments are required to create scalers, passed directly through to the scaler
|
||||
/// * `lattice_id` - The lattice id the scalers operate on
|
||||
/// * `policies` - The policies to use when creating the scalers so they can access secrets
|
||||
/// * `notifier_subject` - The subject to use when creating the scalers so they can report status
|
||||
/// * `notifier` - The publisher to use when creating the scalers so they can report status
|
||||
/// * `snapshot_data` - The store to use when creating the scalers so they can access lattice state
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn component_scalers<S, P, L>(
|
||||
scalers: &mut ScalerList,
|
||||
components: &[Component],
|
||||
properties: &ComponentProperties,
|
||||
traits: Option<&Vec<Trait>>,
|
||||
manifest_name: &str,
|
||||
application_name: &str,
|
||||
component_name: &str,
|
||||
lattice_id: &str,
|
||||
policies: &HashMap<&String, &Policy>,
|
||||
notifier_subject: &str,
|
||||
notifier: &P,
|
||||
snapshot_data: &SnapshotStore<S, L>,
|
||||
) where
|
||||
S: ReadStore + Send + Sync + Clone + 'static,
|
||||
P: Publisher + Clone + Send + Sync + 'static,
|
||||
L: LinkSource + ConfigSource + SecretSource + Clone + Send + Sync + 'static,
|
||||
{
|
||||
scalers.extend(traits.unwrap_or(&EMPTY_TRAIT_VEC).iter().filter_map(|trt| {
|
||||
// If an image is specified, then it's a component in the same manifest. Otherwise, it's a shared component
|
||||
let component_id = if properties.image.is_some() {
|
||||
compute_component_id(manifest_name, properties.id.as_ref(), component_name)
|
||||
} else {
|
||||
compute_component_id(application_name, properties.id.as_ref(), component_name)
|
||||
};
|
||||
let (config_scalers, mut config_names) =
|
||||
config_to_scalers(snapshot_data, manifest_name, &properties.config);
|
||||
let (secret_scalers, secret_names) = secrets_to_scalers(
|
||||
snapshot_data,
|
||||
manifest_name,
|
||||
&properties.secrets,
|
||||
policies,
|
||||
);
|
||||
|
||||
config_names.append(&mut secret_names.clone());
|
||||
// TODO(#451): Consider a way to report on status of a shared component
|
||||
match (trt.trait_type.as_str(), &trt.properties, &properties.image) {
|
||||
// Shared application components already have their own spread/daemon scalers, you
|
||||
// cannot modify them from another manifest
|
||||
(SPREADSCALER_TRAIT, TraitProperty::SpreadScaler(_), None) => {
|
||||
warn!(
|
||||
"Unsupported SpreadScaler trait specified for a shared component {component_name}"
|
||||
);
|
||||
None
|
||||
}
|
||||
(DAEMONSCALER_TRAIT, TraitProperty::SpreadScaler(_), None) => {
|
||||
warn!(
|
||||
"Unsupported DaemonScaler trait specified for a shared component {component_name}"
|
||||
);
|
||||
None
|
||||
}
|
||||
(SPREADSCALER_TRAIT, TraitProperty::SpreadScaler(p), Some(image_ref)) => {
|
||||
// If the image is not specified, then it's a reference to a shared provider
|
||||
// in a different manifest
|
||||
Some(Box::new(BackoffWrapper::new(
|
||||
ComponentSpreadScaler::new(
|
||||
snapshot_data.clone(),
|
||||
image_ref.clone(),
|
||||
component_id,
|
||||
lattice_id.to_owned(),
|
||||
application_name.to_owned(),
|
||||
p.to_owned(),
|
||||
component_name,
|
||||
config_names,
|
||||
),
|
||||
notifier.clone(),
|
||||
config_scalers,
|
||||
secret_scalers,
|
||||
notifier_subject,
|
||||
application_name,
|
||||
Some(Duration::from_secs(5)),
|
||||
)) as BoxedScaler)
|
||||
}
|
||||
(DAEMONSCALER_TRAIT, TraitProperty::SpreadScaler(p), Some(image_ref)) => {
|
||||
Some(Box::new(BackoffWrapper::new(
|
||||
ComponentDaemonScaler::new(
|
||||
snapshot_data.clone(),
|
||||
image_ref.to_owned(),
|
||||
component_id,
|
||||
lattice_id.to_owned(),
|
||||
application_name.to_owned(),
|
||||
p.to_owned(),
|
||||
component_name,
|
||||
config_names,
|
||||
),
|
||||
notifier.clone(),
|
||||
config_scalers,
|
||||
secret_scalers,
|
||||
notifier_subject,
|
||||
application_name,
|
||||
Some(Duration::from_secs(5)),
|
||||
)) as BoxedScaler)
|
||||
}
|
||||
(LINK_TRAIT, TraitProperty::Link(p), _) => {
|
||||
// Find the target component of the link and create a scaler for it
|
||||
components
|
||||
.iter()
|
||||
.find_map(|component| match &component.properties {
|
||||
Properties::Capability {
|
||||
properties:
|
||||
CapabilityProperties {
|
||||
id,
|
||||
application,
|
||||
image,
|
||||
..
|
||||
},
|
||||
}
|
||||
| Properties::Component {
|
||||
properties:
|
||||
ComponentProperties {
|
||||
id,
|
||||
application,
|
||||
image,
|
||||
..
|
||||
},
|
||||
} if component.name == p.target.name => Some(link_scaler(
|
||||
p,
|
||||
lattice_id,
|
||||
manifest_name,
|
||||
application_name,
|
||||
&component.name,
|
||||
component_id.to_string(),
|
||||
id.as_ref(),
|
||||
image.as_ref(),
|
||||
application.as_ref(),
|
||||
policies,
|
||||
notifier_subject,
|
||||
notifier,
|
||||
snapshot_data,
|
||||
)),
|
||||
_ => None,
|
||||
})
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
/// Helper function, primarily to remove nesting, that extends a [`ScalerList`] with all scalers
|
||||
/// from a capability provider [`Component`]
|
||||
/// /// # Arguments
|
||||
/// * `scalers` - The list of scalers to extend
|
||||
/// * `components` - The list of components to convert
|
||||
/// * `properties` - The properties of the capability provider to convert
|
||||
/// * `traits` - The traits of the component to convert
|
||||
/// * `manifest_name` - The name of the manifest that the scalers are being created for
|
||||
/// * `application_name` - The name of the application that the scalers are being created for
|
||||
/// * `component_name` - The name of the component to convert
|
||||
/// * **The following arguments are required to create scalers, passed directly through to the scaler
|
||||
/// * `lattice_id` - The lattice id the scalers operate on
|
||||
/// * `policies` - The policies to use when creating the scalers so they can access secrets
|
||||
/// * `notifier_subject` - The subject to use when creating the scalers so they can report status
|
||||
/// * `notifier` - The publisher to use when creating the scalers so they can report status
|
||||
/// * `snapshot_data` - The store to use when creating the scalers so they can access lattice state
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn provider_scalers<S, P, L>(
|
||||
scalers: &mut ScalerList,
|
||||
components: &[Component],
|
||||
properties: &CapabilityProperties,
|
||||
traits: Option<&Vec<Trait>>,
|
||||
manifest_name: &str,
|
||||
application_name: &str,
|
||||
component_name: &str,
|
||||
lattice_id: &str,
|
||||
policies: &HashMap<&String, &Policy>,
|
||||
notifier_subject: &str,
|
||||
notifier: &P,
|
||||
snapshot_data: &SnapshotStore<S, L>,
|
||||
) where
|
||||
S: ReadStore + Send + Sync + Clone + 'static,
|
||||
P: Publisher + Clone + Send + Sync + 'static,
|
||||
L: LinkSource + ConfigSource + SecretSource + Clone + Send + Sync + 'static,
|
||||
{
|
||||
// If an image is specified, then it's a provider in the same manifest. Otherwise, it's a shared component
|
||||
let provider_id = if properties.image.is_some() {
|
||||
compute_component_id(manifest_name, properties.id.as_ref(), component_name)
|
||||
} else {
|
||||
compute_component_id(application_name, properties.id.as_ref(), component_name)
|
||||
};
|
||||
|
||||
let mut scaler_specified = false;
|
||||
scalers.extend(traits.unwrap_or(&EMPTY_TRAIT_VEC).iter().filter_map(|trt| {
|
||||
match (trt.trait_type.as_str(), &trt.properties, &properties.image) {
|
||||
// Shared application components already have their own spread/daemon scalers, you
|
||||
// cannot modify them from another manifest
|
||||
(SPREADSCALER_TRAIT, TraitProperty::SpreadScaler(_), None) => {
|
||||
warn!(
|
||||
"Unsupported SpreadScaler trait specified for a shared provider {component_name}"
|
||||
);
|
||||
None
|
||||
}
|
||||
(DAEMONSCALER_TRAIT, TraitProperty::SpreadScaler(_), None) => {
|
||||
warn!(
|
||||
"Unsupported DaemonScaler trait specified for a shared provider {component_name}"
|
||||
);
|
||||
None
|
||||
}
|
||||
(SPREADSCALER_TRAIT, TraitProperty::SpreadScaler(p), Some(image)) => {
|
||||
scaler_specified = true;
|
||||
let (config_scalers, mut config_names) =
|
||||
config_to_scalers(snapshot_data, application_name, &properties.config);
|
||||
let (secret_scalers, secret_names) = secrets_to_scalers(
|
||||
snapshot_data,
|
||||
application_name,
|
||||
&properties.secrets,
|
||||
policies,
|
||||
);
|
||||
config_names.append(&mut secret_names.clone());
|
||||
|
||||
Some(Box::new(BackoffWrapper::new(
|
||||
ProviderSpreadScaler::new(
|
||||
snapshot_data.clone(),
|
||||
ProviderSpreadConfig {
|
||||
lattice_id: lattice_id.to_owned(),
|
||||
provider_id: provider_id.to_owned(),
|
||||
provider_reference: image.to_owned(),
|
||||
spread_config: p.to_owned(),
|
||||
model_name: application_name.to_owned(),
|
||||
provider_config: config_names,
|
||||
},
|
||||
component_name,
|
||||
),
|
||||
notifier.clone(),
|
||||
config_scalers,
|
||||
secret_scalers,
|
||||
notifier_subject,
|
||||
application_name,
|
||||
// Providers are a bit longer because it can take a bit to download
|
||||
Some(Duration::from_secs(60)),
|
||||
)) as BoxedScaler)
|
||||
}
|
||||
(DAEMONSCALER_TRAIT, TraitProperty::SpreadScaler(p), Some(image)) => {
|
||||
scaler_specified = true;
|
||||
let (config_scalers, mut config_names) =
|
||||
config_to_scalers(snapshot_data, application_name, &properties.config);
|
||||
let (secret_scalers, secret_names) = secrets_to_scalers(
|
||||
snapshot_data,
|
||||
application_name,
|
||||
&properties.secrets,
|
||||
policies,
|
||||
);
|
||||
config_names.append(&mut secret_names.clone());
|
||||
Some(Box::new(BackoffWrapper::new(
|
||||
ProviderDaemonScaler::new(
|
||||
snapshot_data.clone(),
|
||||
ProviderSpreadConfig {
|
||||
lattice_id: lattice_id.to_owned(),
|
||||
provider_id: provider_id.to_owned(),
|
||||
provider_reference: image.to_owned(),
|
||||
spread_config: p.to_owned(),
|
||||
model_name: application_name.to_owned(),
|
||||
provider_config: config_names,
|
||||
},
|
||||
component_name,
|
||||
),
|
||||
notifier.clone(),
|
||||
config_scalers,
|
||||
secret_scalers,
|
||||
notifier_subject,
|
||||
application_name,
|
||||
// Providers are a bit longer because it can take a bit to download
|
||||
Some(Duration::from_secs(60)),
|
||||
)) as BoxedScaler)
|
||||
}
|
||||
// Find the target component of the link and create a scaler for it.
|
||||
(LINK_TRAIT, TraitProperty::Link(p), _) => {
|
||||
components
|
||||
.iter()
|
||||
.find_map(|component| match &component.properties {
|
||||
// Providers cannot link to other providers, only components
|
||||
Properties::Capability { .. } if component.name == p.target.name => {
|
||||
error!(
|
||||
"Provider {} cannot link to provider {}, only components",
|
||||
&component.name, p.target.name
|
||||
);
|
||||
None
|
||||
}
|
||||
Properties::Component {
|
||||
properties:
|
||||
ComponentProperties {
|
||||
image,
|
||||
application,
|
||||
id,
|
||||
..
|
||||
},
|
||||
} if component.name == p.target.name => Some(link_scaler(
|
||||
p,
|
||||
lattice_id,
|
||||
manifest_name,
|
||||
application_name,
|
||||
&component.name,
|
||||
provider_id.to_owned(),
|
||||
id.as_ref(),
|
||||
image.as_ref(),
|
||||
application.as_ref(),
|
||||
policies,
|
||||
notifier_subject,
|
||||
notifier,
|
||||
snapshot_data,
|
||||
)),
|
||||
_ => None,
|
||||
})
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}));
|
||||
// Allow providers to omit the spreadscaler entirely for simplicity
|
||||
if !scaler_specified {
|
||||
if let Some(image) = &properties.image {
|
||||
let (config_scalers, mut config_names) =
|
||||
config_to_scalers(snapshot_data, application_name, &properties.config);
|
||||
|
||||
let (secret_scalers, mut secret_names) = secrets_to_scalers(
|
||||
snapshot_data,
|
||||
application_name,
|
||||
&properties.secrets,
|
||||
policies,
|
||||
);
|
||||
config_names.append(&mut secret_names);
|
||||
scalers.push(Box::new(BackoffWrapper::new(
|
||||
ProviderSpreadScaler::new(
|
||||
snapshot_data.clone(),
|
||||
ProviderSpreadConfig {
|
||||
lattice_id: lattice_id.to_owned(),
|
||||
provider_id,
|
||||
provider_reference: image.to_owned(),
|
||||
spread_config: SpreadScalerProperty {
|
||||
instances: 1,
|
||||
spread: vec![],
|
||||
},
|
||||
model_name: application_name.to_owned(),
|
||||
provider_config: config_names,
|
||||
},
|
||||
component_name,
|
||||
),
|
||||
notifier.clone(),
|
||||
config_scalers,
|
||||
secret_scalers,
|
||||
notifier_subject,
|
||||
application_name,
|
||||
// Providers are a bit longer because it can take a bit to download
|
||||
Some(Duration::from_secs(60)),
|
||||
)) as BoxedScaler)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Resolves configuration, secrets, and the target of a link to create a boxed [`LinkScaler`]
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `link_property` - The properties of the link to convert
|
||||
/// * `lattice_id` - The lattice id the scalers operate on
|
||||
/// * `manifest_name` - The name of the manifest that the scalers are being created for
|
||||
/// * `application_name` - The name of the application that the scalers are being created for
|
||||
/// * `component_name` - The name of the component to convert
|
||||
/// * `source_id` - The ID of the source component
|
||||
/// * `target_id` - The optional ID of the target component
|
||||
/// * `image` - The optional image reference of the target component
|
||||
/// * `shared` - The optional shared application reference of the target component
|
||||
/// * `policies` - The policies to use when creating the scalers so they can access secrets
|
||||
/// * `notifier_subject` - The subject to use when creating the scalers so they can report status
|
||||
/// * `notifier` - The publisher to use when creating the scalers so they can report status
|
||||
/// * `snapshot_data` - The store to use when creating the scalers so they can access lattice state
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn link_scaler<S, P, L>(
|
||||
link_property: &LinkProperty,
|
||||
lattice_id: &str,
|
||||
manifest_name: &str,
|
||||
application_name: &str,
|
||||
component_name: &str,
|
||||
source_id: String,
|
||||
target_id: Option<&String>,
|
||||
image: Option<&String>,
|
||||
shared: Option<&SharedApplicationComponentProperties>,
|
||||
policies: &HashMap<&String, &Policy>,
|
||||
notifier_subject: &str,
|
||||
notifier: &P,
|
||||
snapshot_data: &SnapshotStore<S, L>,
|
||||
) -> BoxedScaler
|
||||
where
|
||||
S: ReadStore + Send + Sync + Clone + 'static,
|
||||
P: Publisher + Clone + Send + Sync + 'static,
|
||||
L: LinkSource + ConfigSource + SecretSource + Clone + Send + Sync + 'static,
|
||||
{
|
||||
let (mut config_scalers, mut source_config) = config_to_scalers(
|
||||
snapshot_data,
|
||||
manifest_name,
|
||||
&link_property
|
||||
.source
|
||||
.as_ref()
|
||||
.unwrap_or(&Default::default())
|
||||
.config,
|
||||
);
|
||||
let (target_config_scalers, mut target_config) =
|
||||
config_to_scalers(snapshot_data, manifest_name, &link_property.target.config);
|
||||
let (target_secret_scalers, target_secrets) = secrets_to_scalers(
|
||||
snapshot_data,
|
||||
manifest_name,
|
||||
&link_property.target.secrets,
|
||||
policies,
|
||||
);
|
||||
let (mut source_secret_scalers, source_secrets) = secrets_to_scalers(
|
||||
snapshot_data,
|
||||
manifest_name,
|
||||
&link_property
|
||||
.source
|
||||
.as_ref()
|
||||
.unwrap_or(&Default::default())
|
||||
.secrets,
|
||||
policies,
|
||||
);
|
||||
config_scalers.extend(target_config_scalers);
|
||||
source_secret_scalers.extend(target_secret_scalers);
|
||||
target_config.extend(target_secrets);
|
||||
source_config.extend(source_secrets);
|
||||
|
||||
let (target_manifest_name, target_component_name) =
|
||||
match resolve_manifest_component(manifest_name, component_name, image, shared) {
|
||||
Ok(name) => name,
|
||||
Err(err) => {
|
||||
error!(err);
|
||||
return Box::new(StatusScaler::new(
|
||||
uuid::Uuid::new_v4().to_string(),
|
||||
LINK_SCALER_KIND,
|
||||
format!(
|
||||
"{} -({}:{})-> {}",
|
||||
component_name,
|
||||
link_property.namespace,
|
||||
link_property.package,
|
||||
link_property.target.name
|
||||
),
|
||||
StatusInfo::failed(err),
|
||||
)) as BoxedScaler;
|
||||
}
|
||||
};
|
||||
let target = compute_component_id(target_manifest_name, target_id, target_component_name);
|
||||
Box::new(BackoffWrapper::new(
|
||||
LinkScaler::new(
|
||||
snapshot_data.clone(),
|
||||
LinkScalerConfig {
|
||||
source_id,
|
||||
target,
|
||||
wit_namespace: link_property.namespace.to_owned(),
|
||||
wit_package: link_property.package.to_owned(),
|
||||
wit_interfaces: link_property.interfaces.to_owned(),
|
||||
name: link_property
|
||||
.name
|
||||
.to_owned()
|
||||
.unwrap_or_else(|| DEFAULT_LINK_NAME.to_string()),
|
||||
lattice_id: lattice_id.to_owned(),
|
||||
model_name: application_name.to_owned(),
|
||||
source_config,
|
||||
target_config,
|
||||
},
|
||||
snapshot_data.clone(),
|
||||
),
|
||||
notifier.clone(),
|
||||
config_scalers,
|
||||
source_secret_scalers,
|
||||
notifier_subject,
|
||||
application_name,
|
||||
Some(Duration::from_secs(5)),
|
||||
)) as BoxedScaler
|
||||
}
|
||||
|
||||
/// Returns a tuple which is a list of scalers and a list of the names of the configs that the
|
||||
/// scalers use.
|
||||
///
|
||||
/// Any input [ConfigProperty] that has a `properties` field will be converted into a [ConfigScaler], and
|
||||
/// the name of the configuration will be modified to be unique to the model and component. If the properties
|
||||
/// field is not present, the name will be used as-is and assumed that it's managed externally to wadm.
|
||||
fn config_to_scalers<C: ConfigSource + Send + Sync + Clone>(
|
||||
config_source: &C,
|
||||
manifest_name: &str,
|
||||
configs: &[ConfigProperty],
|
||||
) -> (Vec<ConfigScaler<C>>, Vec<String>) {
|
||||
configs
|
||||
.iter()
|
||||
.map(|config| {
|
||||
let name = if config.properties.is_some() {
|
||||
compute_component_id(manifest_name, None, &config.name)
|
||||
} else {
|
||||
config.name.clone()
|
||||
};
|
||||
(
|
||||
ConfigScaler::new(config_source.clone(), &name, config.properties.as_ref()),
|
||||
name,
|
||||
)
|
||||
})
|
||||
.unzip()
|
||||
}
|
||||
|
||||
fn secrets_to_scalers<S: SecretSource + Send + Sync + Clone>(
|
||||
secret_source: &S,
|
||||
manifest_name: &str,
|
||||
secrets: &[SecretProperty],
|
||||
policies: &HashMap<&String, &Policy>,
|
||||
) -> (Vec<SecretScaler<S>>, Vec<String>) {
|
||||
secrets
|
||||
.iter()
|
||||
.map(|s| {
|
||||
let name = compute_secret_id(manifest_name, None, &s.name);
|
||||
let policy = *policies.get(&s.properties.policy).unwrap();
|
||||
(
|
||||
SecretScaler::new(
|
||||
name.clone(),
|
||||
policy.clone(),
|
||||
s.clone(),
|
||||
secret_source.clone(),
|
||||
),
|
||||
name,
|
||||
)
|
||||
})
|
||||
.unzip()
|
||||
}
|
||||
|
||||
/// Based on the name of the model and the optionally provided ID, returns a unique ID for the
|
||||
/// component that is a sanitized version of the component reference and model name, separated
|
||||
/// by a dash.
|
||||
pub(crate) fn compute_component_id(
|
||||
manifest_name: &str,
|
||||
component_id: Option<&String>,
|
||||
component_name: &str,
|
||||
) -> String {
|
||||
if let Some(id) = component_id {
|
||||
id.to_owned()
|
||||
} else {
|
||||
format!(
|
||||
"{}-{}",
|
||||
manifest_name
|
||||
.to_lowercase()
|
||||
.replace(|c: char| !c.is_ascii_alphanumeric(), "_"),
|
||||
component_name
|
||||
.to_lowercase()
|
||||
.replace(|c: char| !c.is_ascii_alphanumeric(), "_")
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn compute_secret_id(
|
||||
manifest_name: &str,
|
||||
component_id: Option<&String>,
|
||||
component_name: &str,
|
||||
) -> String {
|
||||
let name = compute_component_id(manifest_name, component_id, component_name);
|
||||
format!("{SECRET_PREFIX}_{name}")
|
||||
}
|
||||
|
||||
/// Helper function to resolve a link to a manifest component, returning the name of the manifest
|
||||
/// and the name of the component where the target resides.
|
||||
///
|
||||
/// If the component resides in the same manifest, then the name of the manifest & the name of the
|
||||
/// component as specified will be returned. In the case that the component resides in a shared
|
||||
/// application, the name of the shared application & the name of the component in that application
|
||||
/// will be returned.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `application_name` - The name of the manifest that the scalers are being created for
|
||||
/// * `component_name` - The name of the component in the source manifest to target
|
||||
/// * `component_image_ref` - The image reference for the component
|
||||
/// * `shared_app_info` - The optional shared application reference for the component
|
||||
fn resolve_manifest_component<'a>(
|
||||
application_name: &'a str,
|
||||
component_name: &'a str,
|
||||
component_image_ref: Option<&'a String>,
|
||||
shared_app_info: Option<&'a SharedApplicationComponentProperties>,
|
||||
) -> Result<(&'a str, &'a str), &'a str> {
|
||||
match (component_image_ref, shared_app_info) {
|
||||
(Some(_), None) => Ok((application_name, component_name)),
|
||||
(None, Some(app)) => Ok((app.name.as_str(), app.component.as_str())),
|
||||
// These two cases should both be unreachable, since this is caught at manifest
|
||||
// validation before it's put. Just in case, we'll log an error and ensure the status is failed
|
||||
(None, None) => Err("Application did not specify an image or shared application reference"),
|
||||
(Some(_image), Some(_app)) => {
|
||||
Err("Application specified both an image and a shared application reference")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::compute_component_id;
|
||||
|
||||
#[test]
|
||||
fn compute_proper_component_id() {
|
||||
// User supplied ID always takes precedence
|
||||
assert_eq!(
|
||||
compute_component_id("mymodel", Some(&"myid".to_string()), "echo"),
|
||||
"myid"
|
||||
);
|
||||
assert_eq!(
|
||||
compute_component_id(
|
||||
"some model name with spaces cause yaml",
|
||||
Some(&"myid".to_string()),
|
||||
" echo "
|
||||
),
|
||||
"myid"
|
||||
);
|
||||
// Sanitize component reference
|
||||
assert_eq!(
|
||||
compute_component_id("mymodel", None, "echo-component"),
|
||||
"mymodel-echo_component"
|
||||
);
|
||||
// Ensure we can support spaces in the model name, because YAML strings
|
||||
assert_eq!(
|
||||
compute_component_id("some model name with spaces cause yaml", None, "echo"),
|
||||
"some_model_name_with_spaces_cause_yaml-echo"
|
||||
);
|
||||
// Ensure we can support spaces in the model name, because YAML strings
|
||||
// Ensure we can support lowercasing the reference as well, just in case
|
||||
assert_eq!(
|
||||
compute_component_id("My ThInG", None, "thing.wasm"),
|
||||
"my_thing-thing_wasm"
|
||||
);
|
||||
}
|
||||
}
|
|
@ -7,6 +7,7 @@ use tokio::sync::RwLock;
|
|||
use tracing::{instrument, trace};
|
||||
use wadm_types::{api::StatusInfo, Spread, SpreadScalerProperty, TraitProperty};
|
||||
|
||||
use crate::events::ConfigSet;
|
||||
use crate::scaler::spreadscaler::{
|
||||
compute_ineligible_hosts, eligible_hosts, spreadscaler_annotations,
|
||||
};
|
||||
|
@ -119,6 +120,9 @@ impl<S: ReadStore + Send + Sync + Clone> Scaler for ComponentDaemonScaler<S> {
|
|||
Ok(Vec::new())
|
||||
}
|
||||
}
|
||||
Event::ConfigSet(ConfigSet { config_name }) if self.config.contains(config_name) => {
|
||||
self.reconcile().await
|
||||
}
|
||||
// No other event impacts the job of this scaler so we can ignore it
|
||||
_ => Ok(Vec::new()),
|
||||
}
|
||||
|
@ -357,10 +361,10 @@ mod test {
|
|||
sync::Arc,
|
||||
};
|
||||
|
||||
use anyhow::Result;
|
||||
use anyhow::{anyhow, Result};
|
||||
use chrono::Utc;
|
||||
use wadm_types::{api::StatusType, Spread, SpreadScalerProperty};
|
||||
use wasmcloud_control_interface::{HostInventory, InterfaceLinkDefinition};
|
||||
use wasmcloud_control_interface::{HostInventory, Link};
|
||||
|
||||
use crate::{
|
||||
commands::Command,
|
||||
|
@ -794,20 +798,19 @@ mod test {
|
|||
// Inserting for heartbeat handling later
|
||||
lattice_source.inventory.write().await.insert(
|
||||
host_id_three.to_string(),
|
||||
HostInventory {
|
||||
components: vec![],
|
||||
friendly_name: "hey".to_string(),
|
||||
labels: HashMap::from_iter([
|
||||
HostInventory::builder()
|
||||
.friendly_name("hey".into())
|
||||
.labels(BTreeMap::from_iter([
|
||||
("cloud".to_string(), "purgatory".to_string()),
|
||||
("location".to_string(), "edge".to_string()),
|
||||
("region".to_string(), "us-brooks-1".to_string()),
|
||||
]),
|
||||
providers: vec![],
|
||||
host_id: host_id_three.to_string(),
|
||||
version: "1.0.0".to_string(),
|
||||
uptime_human: "what is time really anyway maaaan".to_string(),
|
||||
uptime_seconds: 42,
|
||||
},
|
||||
]))
|
||||
.host_id(host_id_three.into())
|
||||
.version("1.0.0".into())
|
||||
.uptime_human("what is time really anyway maaaan".into())
|
||||
.uptime_seconds(42)
|
||||
.build()
|
||||
.map_err(|e| anyhow!("failed to build host inventory: {e}"))?,
|
||||
);
|
||||
let command_publisher = CommandPublisher::new(NoopPublisher, "doesntmatter");
|
||||
let status_publisher = StatusPublisher::new(NoopPublisher, None, "doesntmatter");
|
||||
|
@ -957,7 +960,7 @@ mod test {
|
|||
.is_empty());
|
||||
assert!(blobby_daemonscaler
|
||||
.handle_event(&Event::LinkdefSet(LinkdefSet {
|
||||
linkdef: InterfaceLinkDefinition::default()
|
||||
linkdef: Link::default()
|
||||
}))
|
||||
.await?
|
||||
.is_empty());
|
||||
|
|
|
@ -4,15 +4,20 @@ use anyhow::Result;
|
|||
use async_trait::async_trait;
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{instrument, trace};
|
||||
use wadm_types::api::StatusType;
|
||||
use wadm_types::{api::StatusInfo, Spread, SpreadScalerProperty, TraitProperty};
|
||||
|
||||
use crate::commands::StopProvider;
|
||||
use crate::events::{HostHeartbeat, ProviderInfo, ProviderStarted, ProviderStopped};
|
||||
use crate::events::{
|
||||
ConfigSet, HostHeartbeat, ProviderHealthCheckFailed, ProviderHealthCheckInfo,
|
||||
ProviderHealthCheckPassed, ProviderInfo, ProviderStarted, ProviderStopped,
|
||||
};
|
||||
use crate::scaler::compute_id_sha256;
|
||||
use crate::scaler::spreadscaler::{
|
||||
compute_ineligible_hosts, eligible_hosts, provider::ProviderSpreadConfig,
|
||||
spreadscaler_annotations,
|
||||
};
|
||||
use crate::storage::{Provider, ProviderStatus};
|
||||
use crate::SCALER_KEY;
|
||||
use crate::{
|
||||
commands::{Command, StartProvider},
|
||||
|
@ -97,6 +102,65 @@ impl<S: ReadStore + Send + Sync + Clone> Scaler for ProviderDaemonScaler<S> {
|
|||
{
|
||||
self.reconcile().await
|
||||
}
|
||||
// perform status updates for health check events
|
||||
Event::ProviderHealthCheckFailed(ProviderHealthCheckFailed {
|
||||
data: ProviderHealthCheckInfo { provider_id, .. },
|
||||
})
|
||||
| Event::ProviderHealthCheckPassed(ProviderHealthCheckPassed {
|
||||
data: ProviderHealthCheckInfo { provider_id, .. },
|
||||
}) if provider_id == &self.config.provider_id => {
|
||||
let provider = self
|
||||
.store
|
||||
.get::<Provider>(&self.config.lattice_id, &self.config.provider_id)
|
||||
.await?;
|
||||
|
||||
let unhealthy_providers = provider.map_or(0, |p| {
|
||||
p.hosts
|
||||
.values()
|
||||
.filter(|s| *s == &ProviderStatus::Failed)
|
||||
.count()
|
||||
});
|
||||
let status = self.status.read().await.to_owned();
|
||||
// update health status of scaler
|
||||
if let Some(status) = match (status, unhealthy_providers > 0) {
|
||||
// scaler is deployed but contains unhealthy providers
|
||||
(
|
||||
StatusInfo {
|
||||
status_type: StatusType::Deployed,
|
||||
..
|
||||
},
|
||||
true,
|
||||
) => Some(StatusInfo::failed(&format!(
|
||||
"Unhealthy provider on {} host(s)",
|
||||
unhealthy_providers
|
||||
))),
|
||||
// scaler can become unhealthy only if it was previously deployed
|
||||
// once scaler becomes healthy again revert back to deployed state
|
||||
// this is a workaround to detect unhealthy status until
|
||||
// StatusType::Unhealthy can be used
|
||||
(
|
||||
StatusInfo {
|
||||
status_type: StatusType::Failed,
|
||||
message,
|
||||
},
|
||||
false,
|
||||
) if message.starts_with("Unhealthy provider on") => {
|
||||
Some(StatusInfo::deployed(""))
|
||||
}
|
||||
// don't update status if scaler is not deployed
|
||||
_ => None,
|
||||
} {
|
||||
*self.status.write().await = status;
|
||||
}
|
||||
|
||||
// only status needs update no new commands required
|
||||
Ok(Vec::new())
|
||||
}
|
||||
Event::ConfigSet(ConfigSet { config_name })
|
||||
if self.config.provider_config.contains(config_name) =>
|
||||
{
|
||||
self.reconcile().await
|
||||
}
|
||||
// No other event impacts the job of this scaler so we can ignore it
|
||||
_ => Ok(Vec::new()),
|
||||
}
|
||||
|
@ -105,7 +169,6 @@ impl<S: ReadStore + Send + Sync + Clone> Scaler for ProviderDaemonScaler<S> {
|
|||
#[instrument(level = "trace", skip_all, fields(name = %self.config.model_name, scaler_id = %self.id))]
|
||||
async fn reconcile(&self) -> Result<Vec<Command>> {
|
||||
let hosts = self.store.list::<Host>(&self.config.lattice_id).await?;
|
||||
|
||||
let provider_id = &self.config.provider_id;
|
||||
let provider_ref = &self.config.provider_reference;
|
||||
|
||||
|
@ -505,4 +568,274 @@ mod test {
|
|||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_healthy_providers_return_healthy_status() -> Result<()> {
|
||||
let lattice_id = "test_healthy_providers";
|
||||
let provider_ref = "fakecloud.azurecr.io/provider:3.2.1".to_string();
|
||||
let provider_id = "VASDASDIAMAREALPROVIDERPROVIDER";
|
||||
|
||||
let host_id_one = "NASDASDIMAREALHOSTONE";
|
||||
let host_id_two = "NASDASDIMAREALHOSTTWO";
|
||||
|
||||
let store = Arc::new(TestStore::default());
|
||||
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
host_id_one.to_string(),
|
||||
Host {
|
||||
components: HashMap::new(),
|
||||
friendly_name: "hey".to_string(),
|
||||
labels: HashMap::from_iter([
|
||||
("inda".to_string(), "cloud".to_string()),
|
||||
("cloud".to_string(), "fake".to_string()),
|
||||
("region".to_string(), "us-noneofyourbusiness-1".to_string()),
|
||||
]),
|
||||
providers: HashSet::from_iter([ProviderInfo {
|
||||
provider_id: provider_id.to_string(),
|
||||
provider_ref: provider_ref.to_string(),
|
||||
annotations: BTreeMap::default(),
|
||||
}]),
|
||||
uptime_seconds: 123,
|
||||
version: None,
|
||||
id: host_id_one.to_string(),
|
||||
last_seen: Utc::now(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
host_id_two.to_string(),
|
||||
Host {
|
||||
components: HashMap::new(),
|
||||
friendly_name: "hey".to_string(),
|
||||
labels: HashMap::from_iter([
|
||||
("inda".to_string(), "cloud".to_string()),
|
||||
("cloud".to_string(), "real".to_string()),
|
||||
("region".to_string(), "us-yourhouse-1".to_string()),
|
||||
]),
|
||||
providers: HashSet::from_iter([ProviderInfo {
|
||||
provider_id: provider_id.to_string(),
|
||||
provider_ref: provider_ref.to_string(),
|
||||
annotations: BTreeMap::default(),
|
||||
}]),
|
||||
uptime_seconds: 123,
|
||||
version: None,
|
||||
id: host_id_two.to_string(),
|
||||
last_seen: Utc::now(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
provider_id.to_string(),
|
||||
Provider {
|
||||
id: provider_id.to_string(),
|
||||
name: "provider".to_string(),
|
||||
issuer: "issuer".to_string(),
|
||||
reference: provider_ref.to_string(),
|
||||
hosts: HashMap::from([
|
||||
(host_id_one.to_string(), ProviderStatus::Failed),
|
||||
(host_id_two.to_string(), ProviderStatus::Running),
|
||||
]),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Ensure we spread evenly with equal weights, clean division
|
||||
let multi_spread_even = SpreadScalerProperty {
|
||||
// instances are ignored so putting an absurd number
|
||||
instances: 2,
|
||||
spread: vec![Spread {
|
||||
name: "SimpleOne".to_string(),
|
||||
requirements: BTreeMap::from_iter([("inda".to_string(), "cloud".to_string())]),
|
||||
weight: Some(100),
|
||||
}],
|
||||
};
|
||||
|
||||
let spreadscaler = ProviderDaemonScaler::new(
|
||||
store.clone(),
|
||||
ProviderSpreadConfig {
|
||||
lattice_id: lattice_id.to_string(),
|
||||
provider_id: provider_id.to_string(),
|
||||
provider_reference: provider_ref.to_string(),
|
||||
spread_config: multi_spread_even,
|
||||
model_name: MODEL_NAME.to_string(),
|
||||
provider_config: vec!["foobar".to_string()],
|
||||
},
|
||||
"fake_component",
|
||||
);
|
||||
|
||||
spreadscaler.reconcile().await?;
|
||||
spreadscaler
|
||||
.handle_event(&Event::ProviderHealthCheckFailed(
|
||||
ProviderHealthCheckFailed {
|
||||
data: ProviderHealthCheckInfo {
|
||||
provider_id: provider_id.to_string(),
|
||||
host_id: host_id_one.to_string(),
|
||||
},
|
||||
},
|
||||
))
|
||||
.await?;
|
||||
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
provider_id.to_string(),
|
||||
Provider {
|
||||
id: provider_id.to_string(),
|
||||
name: "provider".to_string(),
|
||||
issuer: "issuer".to_string(),
|
||||
reference: provider_ref.to_string(),
|
||||
hosts: HashMap::from([
|
||||
(host_id_one.to_string(), ProviderStatus::Pending),
|
||||
(host_id_two.to_string(), ProviderStatus::Running),
|
||||
]),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
spreadscaler
|
||||
.handle_event(&Event::ProviderHealthCheckPassed(
|
||||
ProviderHealthCheckPassed {
|
||||
data: ProviderHealthCheckInfo {
|
||||
provider_id: provider_id.to_string(),
|
||||
host_id: host_id_two.to_string(),
|
||||
},
|
||||
},
|
||||
))
|
||||
.await?;
|
||||
|
||||
assert_eq!(
|
||||
spreadscaler.status.read().await.to_owned(),
|
||||
StatusInfo::deployed("")
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_unhealthy_providers_return_unhealthy_status() -> Result<()> {
|
||||
let lattice_id = "test_unhealthy_providers";
|
||||
let provider_ref = "fakecloud.azurecr.io/provider:3.2.1".to_string();
|
||||
let provider_id = "VASDASDIAMAREALPROVIDERPROVIDER";
|
||||
|
||||
let host_id_one = "NASDASDIMAREALHOSTONE";
|
||||
let host_id_two = "NASDASDIMAREALHOSTTWO";
|
||||
|
||||
let store = Arc::new(TestStore::default());
|
||||
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
host_id_one.to_string(),
|
||||
Host {
|
||||
components: HashMap::new(),
|
||||
friendly_name: "hey".to_string(),
|
||||
labels: HashMap::from_iter([
|
||||
("inda".to_string(), "cloud".to_string()),
|
||||
("cloud".to_string(), "fake".to_string()),
|
||||
("region".to_string(), "us-noneofyourbusiness-1".to_string()),
|
||||
]),
|
||||
providers: HashSet::from_iter([ProviderInfo {
|
||||
provider_id: provider_id.to_string(),
|
||||
provider_ref: provider_ref.to_string(),
|
||||
annotations: BTreeMap::default(),
|
||||
}]),
|
||||
uptime_seconds: 123,
|
||||
version: None,
|
||||
id: host_id_one.to_string(),
|
||||
last_seen: Utc::now(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
host_id_two.to_string(),
|
||||
Host {
|
||||
components: HashMap::new(),
|
||||
friendly_name: "hey".to_string(),
|
||||
labels: HashMap::from_iter([
|
||||
("inda".to_string(), "cloud".to_string()),
|
||||
("cloud".to_string(), "real".to_string()),
|
||||
("region".to_string(), "us-yourhouse-1".to_string()),
|
||||
]),
|
||||
providers: HashSet::from_iter([ProviderInfo {
|
||||
provider_id: provider_id.to_string(),
|
||||
provider_ref: provider_ref.to_string(),
|
||||
annotations: BTreeMap::default(),
|
||||
}]),
|
||||
uptime_seconds: 123,
|
||||
version: None,
|
||||
id: host_id_two.to_string(),
|
||||
last_seen: Utc::now(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
provider_id.to_string(),
|
||||
Provider {
|
||||
id: provider_id.to_string(),
|
||||
name: "provider".to_string(),
|
||||
issuer: "issuer".to_string(),
|
||||
reference: provider_ref.to_string(),
|
||||
hosts: HashMap::from([
|
||||
(host_id_one.to_string(), ProviderStatus::Failed),
|
||||
(host_id_two.to_string(), ProviderStatus::Running),
|
||||
]),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Ensure we spread evenly with equal weights, clean division
|
||||
let multi_spread_even = SpreadScalerProperty {
|
||||
// instances are ignored so putting an absurd number
|
||||
instances: 2,
|
||||
spread: vec![Spread {
|
||||
name: "SimpleOne".to_string(),
|
||||
requirements: BTreeMap::from_iter([("inda".to_string(), "cloud".to_string())]),
|
||||
weight: Some(100),
|
||||
}],
|
||||
};
|
||||
|
||||
let spreadscaler = ProviderDaemonScaler::new(
|
||||
store.clone(),
|
||||
ProviderSpreadConfig {
|
||||
lattice_id: lattice_id.to_string(),
|
||||
provider_id: provider_id.to_string(),
|
||||
provider_reference: provider_ref.to_string(),
|
||||
spread_config: multi_spread_even,
|
||||
model_name: MODEL_NAME.to_string(),
|
||||
provider_config: vec!["foobar".to_string()],
|
||||
},
|
||||
"fake_component",
|
||||
);
|
||||
|
||||
spreadscaler.reconcile().await?;
|
||||
spreadscaler
|
||||
.handle_event(&Event::ProviderHealthCheckFailed(
|
||||
ProviderHealthCheckFailed {
|
||||
data: ProviderHealthCheckInfo {
|
||||
provider_id: provider_id.to_string(),
|
||||
host_id: host_id_one.to_string(),
|
||||
},
|
||||
},
|
||||
))
|
||||
.await?;
|
||||
|
||||
assert_eq!(
|
||||
spreadscaler.status.read().await.to_owned(),
|
||||
StatusInfo::failed("Unhealthy provider on 1 host(s)")
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
//! A struct that manages creating and removing scalers for all manifests
|
||||
|
||||
use std::{collections::HashMap, ops::Deref, sync::Arc, time::Duration};
|
||||
use std::{collections::HashMap, ops::Deref, sync::Arc};
|
||||
|
||||
use anyhow::Result;
|
||||
use async_nats::jetstream::{
|
||||
|
@ -19,31 +19,18 @@ use tokio::{
|
|||
use tracing::{debug, error, instrument, trace, warn};
|
||||
use wadm_types::{
|
||||
api::{Status, StatusInfo},
|
||||
CapabilityProperties, Component, ComponentProperties, ConfigProperty, Manifest, Policy,
|
||||
Properties, SecretProperty, SpreadScalerProperty, Trait, TraitProperty, DAEMONSCALER_TRAIT,
|
||||
LINK_TRAIT, SPREADSCALER_TRAIT,
|
||||
Manifest,
|
||||
};
|
||||
use wasmcloud_secrets_types::SECRET_PREFIX;
|
||||
|
||||
use crate::{
|
||||
events::Event,
|
||||
publisher::Publisher,
|
||||
scaler::{spreadscaler::ComponentSpreadScaler, Command, Scaler},
|
||||
scaler::{Command, Scaler},
|
||||
storage::{snapshot::SnapshotStore, ReadStore},
|
||||
workers::{CommandPublisher, ConfigSource, LinkSource, SecretSource, StatusPublisher},
|
||||
DEFAULT_LINK_NAME,
|
||||
};
|
||||
|
||||
use super::{
|
||||
configscaler::ConfigScaler,
|
||||
daemonscaler::{provider::ProviderDaemonScaler, ComponentDaemonScaler},
|
||||
secretscaler::SecretScaler,
|
||||
spreadscaler::{
|
||||
link::{LinkScaler, LinkScalerConfig},
|
||||
provider::{ProviderSpreadConfig, ProviderSpreadScaler},
|
||||
},
|
||||
BackoffWrapper,
|
||||
};
|
||||
use super::convert::manifest_components_to_scalers;
|
||||
|
||||
pub type BoxedScaler = Box<dyn Scaler + Send + Sync + 'static>;
|
||||
pub type ScalerList = Vec<BoxedScaler>;
|
||||
|
@ -202,13 +189,13 @@ where
|
|||
.filter_map(|manifest| {
|
||||
let data = manifest.get_deployed()?;
|
||||
let name = manifest.name().to_owned();
|
||||
let scalers = components_to_scalers(
|
||||
let scalers = manifest_components_to_scalers(
|
||||
&data.spec.components,
|
||||
&data.policy_lookup(),
|
||||
lattice_id,
|
||||
&client,
|
||||
&name,
|
||||
&subject,
|
||||
&client,
|
||||
&snapshot_data,
|
||||
);
|
||||
Some((name, scalers))
|
||||
|
@ -293,13 +280,13 @@ where
|
|||
}
|
||||
|
||||
pub fn scalers_for_manifest<'a>(&'a self, manifest: &'a Manifest) -> ScalerList {
|
||||
components_to_scalers(
|
||||
manifest_components_to_scalers(
|
||||
&manifest.spec.components,
|
||||
&manifest.policy_lookup(),
|
||||
&self.lattice_id,
|
||||
&self.client,
|
||||
&manifest.metadata.name,
|
||||
&self.subject,
|
||||
&self.client,
|
||||
&self.snapshot_data,
|
||||
)
|
||||
}
|
||||
|
@ -443,13 +430,13 @@ where
|
|||
match notification {
|
||||
Notifications::CreateScalers(manifest) => {
|
||||
// We don't want to trigger the notification, so just create the scalers and then insert
|
||||
let scalers = components_to_scalers(
|
||||
let scalers = manifest_components_to_scalers(
|
||||
&manifest.spec.components,
|
||||
&manifest.policy_lookup(),
|
||||
&self.lattice_id,
|
||||
&self.client,
|
||||
&manifest.metadata.name,
|
||||
&self.subject,
|
||||
&self.client,
|
||||
&self.snapshot_data,
|
||||
);
|
||||
let num_scalers = scalers.len();
|
||||
|
@ -566,471 +553,3 @@ where
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
const EMPTY_TRAIT_VEC: Vec<Trait> = Vec::new();
|
||||
|
||||
/// Converts a list of components into a list of scalers
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `components` - The list of components to convert
|
||||
/// * `store` - The store to use when creating the scalers so they can access lattice state
|
||||
/// * `lattice_id` - The lattice id the scalers operate on
|
||||
/// * `name` - The name of the manifest that the scalers are being created for
|
||||
pub(crate) fn components_to_scalers<S, P, L>(
|
||||
components: &[Component],
|
||||
policies: &HashMap<&String, &Policy>,
|
||||
lattice_id: &str,
|
||||
notifier: &P,
|
||||
name: &str,
|
||||
notifier_subject: &str,
|
||||
snapshot_data: &SnapshotStore<S, L>,
|
||||
) -> ScalerList
|
||||
where
|
||||
S: ReadStore + Send + Sync + Clone + 'static,
|
||||
P: Publisher + Clone + Send + Sync + 'static,
|
||||
L: LinkSource + ConfigSource + SecretSource + Clone + Send + Sync + 'static,
|
||||
{
|
||||
let mut scalers: ScalerList = Vec::new();
|
||||
for component in components.iter() {
|
||||
let traits = component.traits.as_ref();
|
||||
match &component.properties {
|
||||
Properties::Component { properties: props } => {
|
||||
scalers.extend(traits.unwrap_or(&EMPTY_TRAIT_VEC).iter().filter_map(|trt| {
|
||||
let component_id =
|
||||
compute_component_id(name, props.id.as_ref(), &component.name);
|
||||
let (config_scalers, mut config_names) =
|
||||
config_to_scalers(snapshot_data.clone(), name, &props.config);
|
||||
let (secret_scalers, secret_names) =
|
||||
secrets_to_scalers(snapshot_data.clone(), name, &props.secrets, policies);
|
||||
|
||||
config_names.append(&mut secret_names.clone());
|
||||
match (trt.trait_type.as_str(), &trt.properties) {
|
||||
(SPREADSCALER_TRAIT, TraitProperty::SpreadScaler(p)) => {
|
||||
Some(Box::new(BackoffWrapper::new(
|
||||
ComponentSpreadScaler::new(
|
||||
snapshot_data.clone(),
|
||||
props.image.to_owned(),
|
||||
component_id,
|
||||
lattice_id.to_owned(),
|
||||
name.to_owned(),
|
||||
p.to_owned(),
|
||||
&component.name,
|
||||
config_names,
|
||||
),
|
||||
notifier.to_owned(),
|
||||
config_scalers,
|
||||
secret_scalers,
|
||||
notifier_subject,
|
||||
name,
|
||||
Some(Duration::from_secs(5)),
|
||||
)) as BoxedScaler)
|
||||
}
|
||||
(DAEMONSCALER_TRAIT, TraitProperty::SpreadScaler(p)) => {
|
||||
Some(Box::new(BackoffWrapper::new(
|
||||
ComponentDaemonScaler::new(
|
||||
snapshot_data.clone(),
|
||||
props.image.to_owned(),
|
||||
component_id,
|
||||
lattice_id.to_owned(),
|
||||
name.to_owned(),
|
||||
p.to_owned(),
|
||||
&component.name,
|
||||
config_names,
|
||||
),
|
||||
notifier.to_owned(),
|
||||
config_scalers,
|
||||
secret_scalers,
|
||||
notifier_subject,
|
||||
name,
|
||||
Some(Duration::from_secs(5)),
|
||||
)) as BoxedScaler)
|
||||
}
|
||||
(LINK_TRAIT, TraitProperty::Link(p)) => {
|
||||
components.iter().find_map(|component| {
|
||||
let (mut config_scalers, mut source_config) = config_to_scalers(
|
||||
snapshot_data.clone(),
|
||||
name,
|
||||
&p.source.as_ref().unwrap_or(&Default::default()).config,
|
||||
);
|
||||
let (target_config_scalers, mut target_config) = config_to_scalers(
|
||||
snapshot_data.clone(),
|
||||
name,
|
||||
&p.target.config,
|
||||
);
|
||||
|
||||
let (target_secret_scalers, target_secrets) = secrets_to_scalers(
|
||||
snapshot_data.clone(),
|
||||
name,
|
||||
&p.target.secrets,
|
||||
policies,
|
||||
);
|
||||
let (mut source_secret_scalers, source_secrets) =
|
||||
secrets_to_scalers(
|
||||
snapshot_data.clone(),
|
||||
name,
|
||||
&p.source.as_ref().unwrap_or(&Default::default()).secrets,
|
||||
policies,
|
||||
);
|
||||
config_scalers.extend(target_config_scalers);
|
||||
source_secret_scalers.extend(target_secret_scalers);
|
||||
target_config.extend(target_secrets);
|
||||
source_config.extend(source_secrets);
|
||||
match &component.properties {
|
||||
Properties::Capability {
|
||||
properties: CapabilityProperties { id, .. },
|
||||
}
|
||||
| Properties::Component {
|
||||
properties: ComponentProperties { id, .. },
|
||||
} if component.name == p.target.name => {
|
||||
Some(Box::new(BackoffWrapper::new(
|
||||
LinkScaler::new(
|
||||
snapshot_data.clone(),
|
||||
LinkScalerConfig {
|
||||
source_id: component_id.to_string(),
|
||||
target: compute_component_id(
|
||||
name,
|
||||
id.as_ref(),
|
||||
&component.name,
|
||||
),
|
||||
wit_namespace: p.namespace.to_owned(),
|
||||
wit_package: p.package.to_owned(),
|
||||
wit_interfaces: p.interfaces.to_owned(),
|
||||
name: p.name.to_owned().unwrap_or_else(|| {
|
||||
DEFAULT_LINK_NAME.to_string()
|
||||
}),
|
||||
lattice_id: lattice_id.to_owned(),
|
||||
model_name: name.to_owned(),
|
||||
source_config,
|
||||
target_config,
|
||||
},
|
||||
snapshot_data.clone(),
|
||||
),
|
||||
notifier.to_owned(),
|
||||
config_scalers,
|
||||
source_secret_scalers,
|
||||
notifier_subject,
|
||||
name,
|
||||
Some(Duration::from_secs(5)),
|
||||
))
|
||||
as BoxedScaler)
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
})
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}))
|
||||
}
|
||||
Properties::Capability { properties: props } => {
|
||||
let provider_id = compute_component_id(name, props.id.as_ref(), &component.name);
|
||||
let mut scaler_specified = false;
|
||||
if let Some(traits) = traits {
|
||||
scalers.extend(traits.iter().filter_map(|trt| {
|
||||
match (trt.trait_type.as_str(), &trt.properties) {
|
||||
(SPREADSCALER_TRAIT, TraitProperty::SpreadScaler(p)) => {
|
||||
scaler_specified = true;
|
||||
let (config_scalers, mut config_names) =
|
||||
config_to_scalers(snapshot_data.clone(), name, &props.config);
|
||||
let (secret_scalers, secret_names) = secrets_to_scalers(
|
||||
snapshot_data.clone(),
|
||||
name,
|
||||
&props.secrets,
|
||||
policies,
|
||||
);
|
||||
config_names.append(&mut secret_names.clone());
|
||||
|
||||
Some(Box::new(BackoffWrapper::new(
|
||||
ProviderSpreadScaler::new(
|
||||
snapshot_data.clone(),
|
||||
ProviderSpreadConfig {
|
||||
lattice_id: lattice_id.to_owned(),
|
||||
provider_id: provider_id.to_owned(),
|
||||
provider_reference: props.image.to_owned(),
|
||||
spread_config: p.to_owned(),
|
||||
model_name: name.to_owned(),
|
||||
provider_config: config_names,
|
||||
},
|
||||
&component.name,
|
||||
),
|
||||
notifier.to_owned(),
|
||||
config_scalers,
|
||||
secret_scalers,
|
||||
notifier_subject,
|
||||
name,
|
||||
// Providers are a bit longer because it can take a bit to download
|
||||
Some(Duration::from_secs(60)),
|
||||
)) as BoxedScaler)
|
||||
}
|
||||
(DAEMONSCALER_TRAIT, TraitProperty::SpreadScaler(p)) => {
|
||||
scaler_specified = true;
|
||||
let (config_scalers, mut config_names) =
|
||||
config_to_scalers(snapshot_data.clone(), name, &props.config);
|
||||
let (secret_scalers, secret_names) = secrets_to_scalers(
|
||||
snapshot_data.clone(),
|
||||
name,
|
||||
&props.secrets,
|
||||
policies,
|
||||
);
|
||||
config_names.append(&mut secret_names.clone());
|
||||
Some(Box::new(BackoffWrapper::new(
|
||||
ProviderDaemonScaler::new(
|
||||
snapshot_data.clone(),
|
||||
ProviderSpreadConfig {
|
||||
lattice_id: lattice_id.to_owned(),
|
||||
provider_id: provider_id.to_owned(),
|
||||
provider_reference: props.image.to_owned(),
|
||||
spread_config: p.to_owned(),
|
||||
model_name: name.to_owned(),
|
||||
provider_config: config_names,
|
||||
},
|
||||
&component.name,
|
||||
),
|
||||
notifier.to_owned(),
|
||||
config_scalers,
|
||||
secret_scalers,
|
||||
notifier_subject,
|
||||
name,
|
||||
// Providers are a bit longer because it can take a bit to download
|
||||
Some(Duration::from_secs(60)),
|
||||
)) as BoxedScaler)
|
||||
}
|
||||
(LINK_TRAIT, TraitProperty::Link(p)) => {
|
||||
components.iter().find_map(|component| {
|
||||
let (mut config_scalers, mut source_config) = config_to_scalers(
|
||||
snapshot_data.clone(),
|
||||
name,
|
||||
&p.source.as_ref().unwrap_or(&Default::default()).config,
|
||||
);
|
||||
let (target_config_scalers, mut target_config) =
|
||||
config_to_scalers(
|
||||
snapshot_data.clone(),
|
||||
name,
|
||||
&p.target.config,
|
||||
);
|
||||
let (target_secret_scalers, target_secrets) =
|
||||
secrets_to_scalers(
|
||||
snapshot_data.clone(),
|
||||
name,
|
||||
&p.target.secrets,
|
||||
policies,
|
||||
);
|
||||
let (mut source_secret_scalers, source_secrets) =
|
||||
secrets_to_scalers(
|
||||
snapshot_data.clone(),
|
||||
name,
|
||||
&p.source
|
||||
.as_ref()
|
||||
.unwrap_or(&Default::default())
|
||||
.secrets,
|
||||
policies,
|
||||
);
|
||||
config_scalers.extend(target_config_scalers);
|
||||
source_secret_scalers.extend(target_secret_scalers);
|
||||
|
||||
target_config.extend(target_secrets);
|
||||
source_config.extend(source_secrets);
|
||||
match &component.properties {
|
||||
Properties::Component { properties: cappy }
|
||||
if component.name == p.target.name =>
|
||||
{
|
||||
Some(Box::new(BackoffWrapper::new(
|
||||
LinkScaler::new(
|
||||
snapshot_data.clone(),
|
||||
LinkScalerConfig {
|
||||
source_id: provider_id.to_string(),
|
||||
target: compute_component_id(
|
||||
name,
|
||||
cappy.id.as_ref(),
|
||||
&component.name,
|
||||
),
|
||||
wit_namespace: p.namespace.to_owned(),
|
||||
wit_package: p.package.to_owned(),
|
||||
wit_interfaces: p.interfaces.to_owned(),
|
||||
name: p.name.to_owned().unwrap_or_else(
|
||||
|| DEFAULT_LINK_NAME.to_string(),
|
||||
),
|
||||
lattice_id: lattice_id.to_owned(),
|
||||
model_name: name.to_owned(),
|
||||
source_config,
|
||||
target_config,
|
||||
},
|
||||
snapshot_data.clone(),
|
||||
),
|
||||
notifier.to_owned(),
|
||||
config_scalers,
|
||||
source_secret_scalers,
|
||||
notifier_subject,
|
||||
name,
|
||||
Some(Duration::from_secs(5)),
|
||||
))
|
||||
as BoxedScaler)
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
})
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}))
|
||||
}
|
||||
// Allow providers to omit the scaler entirely for simplicity
|
||||
if !scaler_specified {
|
||||
let (config_scalers, mut config_names) =
|
||||
config_to_scalers(snapshot_data.clone(), name, &props.config);
|
||||
|
||||
let (secret_scalers, secret_names) =
|
||||
secrets_to_scalers(snapshot_data.clone(), name, &props.secrets, policies);
|
||||
config_names.append(&mut secret_names.clone());
|
||||
|
||||
scalers.push(Box::new(BackoffWrapper::new(
|
||||
ProviderSpreadScaler::new(
|
||||
snapshot_data.clone(),
|
||||
ProviderSpreadConfig {
|
||||
lattice_id: lattice_id.to_owned(),
|
||||
provider_id,
|
||||
provider_reference: props.image.to_owned(),
|
||||
spread_config: SpreadScalerProperty {
|
||||
instances: 1,
|
||||
spread: vec![],
|
||||
},
|
||||
model_name: name.to_owned(),
|
||||
provider_config: config_names,
|
||||
},
|
||||
&component.name,
|
||||
),
|
||||
notifier.to_owned(),
|
||||
config_scalers,
|
||||
secret_scalers,
|
||||
notifier_subject,
|
||||
name,
|
||||
// Providers are a bit longer because it can take a bit to download
|
||||
Some(Duration::from_secs(60)),
|
||||
)) as BoxedScaler)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
scalers
|
||||
}
|
||||
|
||||
/// Returns a tuple which is a list of scalers and a list of the names of the configs that the
|
||||
/// scalers use.
|
||||
///
|
||||
/// Any input [ConfigProperty] that has a `properties` field will be converted into a [ConfigScaler], and
|
||||
/// the name of the configuration will be modified to be unique to the model and component. If the properties
|
||||
/// field is not present, the name will be used as-is and assumed that it's managed externally to wadm.
|
||||
fn config_to_scalers<C: ConfigSource + Send + Sync + Clone>(
|
||||
config_source: C,
|
||||
model_name: &str,
|
||||
configs: &[ConfigProperty],
|
||||
) -> (Vec<ConfigScaler<C>>, Vec<String>) {
|
||||
configs
|
||||
.iter()
|
||||
.map(|config| {
|
||||
let name = if config.properties.is_some() {
|
||||
compute_component_id(model_name, None, &config.name)
|
||||
} else {
|
||||
config.name.clone()
|
||||
};
|
||||
(
|
||||
ConfigScaler::new(config_source.clone(), &name, config.properties.as_ref()),
|
||||
name,
|
||||
)
|
||||
})
|
||||
.unzip()
|
||||
}
|
||||
|
||||
fn secrets_to_scalers<S: SecretSource + Send + Sync + Clone>(
|
||||
secret_source: S,
|
||||
model_name: &str,
|
||||
secrets: &[SecretProperty],
|
||||
policies: &HashMap<&String, &Policy>,
|
||||
) -> (Vec<SecretScaler<S>>, Vec<String>) {
|
||||
secrets
|
||||
.iter()
|
||||
.map(|s| {
|
||||
let name = compute_secret_id(model_name, None, &s.name);
|
||||
let policy = *policies.get(&s.properties.policy).unwrap();
|
||||
(
|
||||
SecretScaler::new(
|
||||
name.clone(),
|
||||
policy.clone(),
|
||||
s.clone(),
|
||||
secret_source.clone(),
|
||||
),
|
||||
name,
|
||||
)
|
||||
})
|
||||
.unzip()
|
||||
}
|
||||
|
||||
/// Based on the name of the model and the optionally provided ID, returns a unique ID for the
|
||||
/// component that is a sanitized version of the component reference and model name, separated
|
||||
/// by a dash.
|
||||
pub(crate) fn compute_component_id(
|
||||
model_name: &str,
|
||||
component_id: Option<&String>,
|
||||
component_name: &str,
|
||||
) -> String {
|
||||
if let Some(id) = component_id {
|
||||
id.to_owned()
|
||||
} else {
|
||||
format!(
|
||||
"{}-{}",
|
||||
model_name
|
||||
.to_lowercase()
|
||||
.replace(|c: char| !c.is_ascii_alphanumeric(), "_"),
|
||||
component_name
|
||||
.to_lowercase()
|
||||
.replace(|c: char| !c.is_ascii_alphanumeric(), "_")
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn compute_secret_id(
|
||||
model_name: &str,
|
||||
component_id: Option<&String>,
|
||||
component_name: &str,
|
||||
) -> String {
|
||||
let name = compute_component_id(model_name, component_id, component_name);
|
||||
format!("{SECRET_PREFIX}_{name}")
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crate::scaler::manager::compute_component_id;
|
||||
|
||||
#[test]
|
||||
fn compute_proper_component_id() {
|
||||
// User supplied ID always takes precedence
|
||||
assert_eq!(
|
||||
compute_component_id("mymodel", Some(&"myid".to_string()), "echo"),
|
||||
"myid"
|
||||
);
|
||||
assert_eq!(
|
||||
compute_component_id(
|
||||
"some model name with spaces cause yaml",
|
||||
Some(&"myid".to_string()),
|
||||
" echo "
|
||||
),
|
||||
"myid"
|
||||
);
|
||||
// Sanitize component reference
|
||||
assert_eq!(
|
||||
compute_component_id("mymodel", None, "echo-component"),
|
||||
"mymodel-echo_component"
|
||||
);
|
||||
// Ensure we can support spaces in the model name, because YAML strings
|
||||
assert_eq!(
|
||||
compute_component_id("some model name with spaces cause yaml", None, "echo"),
|
||||
"some_model_name_with_spaces_cause_yaml-echo"
|
||||
);
|
||||
// Ensure we can support spaces in the model name, because YAML strings
|
||||
// Ensure we can support lowercasing the reference as well, just in case
|
||||
assert_eq!(
|
||||
compute_component_id("My ThInG", None, "thing.wasm"),
|
||||
"my_thing-thing_wasm"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,10 +18,12 @@ use crate::{
|
|||
};
|
||||
|
||||
pub mod configscaler;
|
||||
mod convert;
|
||||
pub mod daemonscaler;
|
||||
pub mod manager;
|
||||
pub mod secretscaler;
|
||||
pub mod spreadscaler;
|
||||
pub mod statusscaler;
|
||||
|
||||
use manager::Notifications;
|
||||
|
||||
|
@ -91,16 +93,16 @@ pub trait Scaler {
|
|||
/// necessary prerequisites to reconcile.
|
||||
///
|
||||
/// 1. `required_config` & `required_secrets`: With the introduction of configuration
|
||||
/// for wadm applications, the most necessary prerequisite for components, providers
|
||||
/// and links to start is that their configuration is available. Scalers will not be
|
||||
/// able to issue commands until the configuration exists.
|
||||
/// for wadm applications, the most necessary prerequisite for components, providers
|
||||
/// and links to start is that their configuration is available. Scalers will not be
|
||||
/// able to issue commands until the configuration exists.
|
||||
/// 2. `expected_events`: For scalers that issue commands that should result in events,
|
||||
/// the BackoffWrapper is responsible for ensuring that the scaler doesn't continually
|
||||
/// issue commands that it's already expecting events for. Commonly this will allow a host
|
||||
/// to download larger images from an OCI repository without being bombarded with repeat requests.
|
||||
/// the BackoffWrapper is responsible for ensuring that the scaler doesn't continually
|
||||
/// issue commands that it's already expecting events for. Commonly this will allow a host
|
||||
/// to download larger images from an OCI repository without being bombarded with repeat requests.
|
||||
/// 3. `backoff_status`: If a scaler receives an event that it was expecting, but it was a failure
|
||||
/// event, the scaler should back off exponentially while reporting that failure status. This both
|
||||
/// allows for diagnosing issues with reconciliation and prevents thrashing.
|
||||
/// event, the scaler should back off exponentially while reporting that failure status. This both
|
||||
/// allows for diagnosing issues with reconciliation and prevents thrashing.
|
||||
///
|
||||
/// All of the above effectively allows the inner Scaler to only worry about the logic around
|
||||
/// reconciling and handling events, rather than be concerned about whether or not
|
||||
|
@ -237,11 +239,11 @@ where
|
|||
trace!(failed_event, "Scaler received event that it was expecting");
|
||||
if failed_event {
|
||||
let failed_message = match event {
|
||||
Event::ProviderStartFailed(evt) => &evt.error,
|
||||
Event::ComponentScaleFailed(evt) => &evt.error,
|
||||
_ => &format!("Received a failed event of type '{}'", event.raw_type()),
|
||||
Event::ProviderStartFailed(evt) => evt.error.clone(),
|
||||
Event::ComponentScaleFailed(evt) => evt.error.clone(),
|
||||
_ => format!("Received a failed event of type '{}'", event.raw_type()),
|
||||
};
|
||||
*self.backoff_status.write().await = Some(StatusInfo::failed(failed_message));
|
||||
*self.backoff_status.write().await = Some(StatusInfo::failed(&failed_message));
|
||||
// TODO(#253): Here we could refer to a stored previous duration and increase it
|
||||
self.set_timed_status_cleanup(std::time::Duration::from_secs(5))
|
||||
.await;
|
||||
|
|
|
@ -122,9 +122,9 @@ where
|
|||
self.reconcile().await
|
||||
}
|
||||
Event::LinkdefSet(LinkdefSet { linkdef })
|
||||
if linkdef.source_id == self.config.source_id
|
||||
&& linkdef.target == self.config.target
|
||||
&& linkdef.name == self.config.name =>
|
||||
if linkdef.source_id() == self.config.source_id
|
||||
&& linkdef.target() == self.config.target
|
||||
&& linkdef.name() == self.config.name =>
|
||||
{
|
||||
*self.status.write().await = StatusInfo::deployed("");
|
||||
Ok(Vec::new())
|
||||
|
@ -141,20 +141,23 @@ where
|
|||
let (exists, _config_different) = linkdefs
|
||||
.into_iter()
|
||||
.find(|linkdef| {
|
||||
&linkdef.source_id == source_id
|
||||
&& &linkdef.target == target
|
||||
&& linkdef.name == self.config.name
|
||||
linkdef.source_id() == source_id
|
||||
&& linkdef.target() == target
|
||||
&& linkdef.name() == self.config.name
|
||||
})
|
||||
.map(|linkdef| {
|
||||
(
|
||||
true,
|
||||
// TODO(#88): reverse compare too
|
||||
// Ensure all named configs are the same
|
||||
linkdef.source_config.iter().all(|config_name| {
|
||||
self.config.source_config.iter().any(|c| c == config_name)
|
||||
}) || linkdef.target_config.iter().all(|config_name| {
|
||||
self.config.target_config.iter().any(|c| c == config_name)
|
||||
}),
|
||||
// Ensure all supplied configs (both source and target) are the same
|
||||
linkdef
|
||||
.source_config()
|
||||
.iter()
|
||||
.eq(self.config.source_config.iter())
|
||||
&& linkdef
|
||||
.target_config()
|
||||
.iter()
|
||||
.eq(self.config.target_config.iter()),
|
||||
)
|
||||
})
|
||||
.unwrap_or((false, false));
|
||||
|
@ -275,7 +278,7 @@ mod test {
|
|||
vec,
|
||||
};
|
||||
|
||||
use wasmcloud_control_interface::InterfaceLinkDefinition;
|
||||
use wasmcloud_control_interface::Link;
|
||||
|
||||
use chrono::Utc;
|
||||
|
||||
|
@ -427,26 +430,25 @@ mod test {
|
|||
let provider_ref = "provider_ref".to_string();
|
||||
let provider_id = "provider".to_string();
|
||||
|
||||
let linkdef = InterfaceLinkDefinition {
|
||||
source_id: component_id.to_string(),
|
||||
target: provider_id.to_string(),
|
||||
wit_namespace: "namespace".to_string(),
|
||||
wit_package: "package".to_string(),
|
||||
interfaces: vec!["interface".to_string()],
|
||||
name: "default".to_string(),
|
||||
source_config: vec![],
|
||||
target_config: vec![],
|
||||
};
|
||||
let linkdef = Link::builder()
|
||||
.source_id(&component_id)
|
||||
.target(&provider_id)
|
||||
.wit_namespace("namespace")
|
||||
.wit_package("package")
|
||||
.interfaces(vec!["interface".to_string()])
|
||||
.name("default")
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let scaler = LinkScaler::new(
|
||||
create_store(&lattice_id, &component_ref, &provider_ref).await,
|
||||
LinkScalerConfig {
|
||||
source_id: linkdef.source_id.clone(),
|
||||
target: linkdef.target.clone(),
|
||||
wit_namespace: linkdef.wit_namespace.clone(),
|
||||
wit_package: linkdef.wit_package.clone(),
|
||||
wit_interfaces: linkdef.interfaces.clone(),
|
||||
name: linkdef.name.clone(),
|
||||
source_id: linkdef.source_id().to_string(),
|
||||
target: linkdef.target().to_string(),
|
||||
wit_namespace: linkdef.wit_namespace().to_string(),
|
||||
wit_package: linkdef.wit_package().to_string(),
|
||||
wit_interfaces: linkdef.interfaces().clone(),
|
||||
name: linkdef.name().to_string(),
|
||||
source_config: vec![],
|
||||
target_config: vec![],
|
||||
lattice_id: lattice_id.clone(),
|
||||
|
@ -580,17 +582,15 @@ mod test {
|
|||
|
||||
let commands = link_scaler
|
||||
.handle_event(&Event::LinkdefSet(LinkdefSet {
|
||||
linkdef: InterfaceLinkDefinition {
|
||||
linkdef: Link::builder()
|
||||
// NOTE: contract, link, and provider id matches but the component is different
|
||||
source_id: "nm0001772".to_string(),
|
||||
target: "VASDASD".to_string(),
|
||||
wit_namespace: "wasmcloud".to_string(),
|
||||
wit_package: "httpserver".to_string(),
|
||||
interfaces: vec![],
|
||||
name: "default".to_string(),
|
||||
source_config: vec![],
|
||||
target_config: vec![],
|
||||
},
|
||||
.source_id("nm0001772")
|
||||
.target("VASDASD")
|
||||
.wit_namespace("wasmcloud")
|
||||
.wit_package("httpserver")
|
||||
.name("default")
|
||||
.build()
|
||||
.unwrap(),
|
||||
}))
|
||||
.await
|
||||
.expect("");
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
use std::collections::{BTreeMap, HashSet};
|
||||
use std::{cmp::Ordering, cmp::Reverse, collections::HashMap};
|
||||
use std::{
|
||||
cmp::Ordering, cmp::Reverse, collections::BTreeMap, collections::HashMap, collections::HashSet,
|
||||
};
|
||||
|
||||
use anyhow::Result;
|
||||
use async_trait::async_trait;
|
||||
|
@ -9,7 +10,7 @@ use wadm_types::{
|
|||
api::StatusInfo, Spread, SpreadScalerProperty, TraitProperty, DEFAULT_SPREAD_WEIGHT,
|
||||
};
|
||||
|
||||
use crate::events::HostHeartbeat;
|
||||
use crate::events::{ConfigSet, HostHeartbeat};
|
||||
use crate::{
|
||||
commands::{Command, ScaleComponent},
|
||||
events::{Event, HostStarted, HostStopped},
|
||||
|
@ -113,6 +114,9 @@ impl<S: ReadStore + Send + Sync + Clone> Scaler for ComponentSpreadScaler<S> {
|
|||
Ok(Vec::new())
|
||||
}
|
||||
}
|
||||
Event::ConfigSet(ConfigSet { config_name }) if self.config.contains(config_name) => {
|
||||
self.reconcile().await
|
||||
}
|
||||
// No other event impacts the job of this scaler so we can ignore it
|
||||
_ => Ok(Vec::new()),
|
||||
}
|
||||
|
@ -171,6 +175,7 @@ impl<S: ReadStore + Send + Sync + Clone> Scaler for ComponentSpreadScaler<S> {
|
|||
|
||||
let mut spread_status = vec![];
|
||||
trace!(spread_requirements = ?self.spread_requirements, ?component_id, "Computing commands");
|
||||
let mut component_instances_per_eligible_host: HashMap<&String, usize> = HashMap::new();
|
||||
let commands = self
|
||||
.spread_requirements
|
||||
.iter()
|
||||
|
@ -207,6 +212,14 @@ impl<S: ReadStore + Send + Sync + Clone> Scaler for ComponentSpreadScaler<S> {
|
|||
}).collect()
|
||||
})
|
||||
.unwrap_or_default();
|
||||
running_components_per_host.iter().for_each(|(host_id, count)| {
|
||||
component_instances_per_eligible_host
|
||||
.entry(host_id)
|
||||
.and_modify(|e| *e += count)
|
||||
.or_insert(*count);
|
||||
});
|
||||
|
||||
|
||||
let current_count: usize = running_components_per_host.values().sum();
|
||||
trace!(current = %current_count, expected = %count, "Calculated running components, reconciling with expected count");
|
||||
// Here we'll generate commands for the proper host depending on where they are running
|
||||
|
@ -266,6 +279,19 @@ impl<S: ReadStore + Send + Sync + Clone> Scaler for ComponentSpreadScaler<S> {
|
|||
.collect::<Vec<Command>>();
|
||||
trace!(?commands, "Calculated commands for component scaler");
|
||||
|
||||
// Detect spread requirement conflicts
|
||||
if let Some(message) = detect_spread_requirement_conflicts(
|
||||
&self.spread_requirements,
|
||||
&hosts,
|
||||
&component_instances_per_eligible_host,
|
||||
&commands,
|
||||
) {
|
||||
let status = StatusInfo::failed(&message);
|
||||
trace!(?status, "Updating scaler status");
|
||||
*self.status.write().await = status;
|
||||
return Ok(vec![]);
|
||||
}
|
||||
|
||||
let status = match (spread_status.is_empty(), commands.is_empty()) {
|
||||
// No failures, no commands, scaler satisfied
|
||||
(true, true) => StatusInfo::deployed(""),
|
||||
|
@ -470,6 +496,94 @@ fn compute_spread(spread_config: &SpreadScalerProperty) -> Vec<(Spread, usize)>
|
|||
computed_spreads
|
||||
}
|
||||
|
||||
fn detect_spread_requirement_conflicts(
|
||||
spread_requirements: &[(Spread, usize)],
|
||||
hosts: &HashMap<String, Host>,
|
||||
running_instances_per_host: &HashMap<&String, usize>,
|
||||
commands: &[Command],
|
||||
) -> Option<String> {
|
||||
// Step 1: Determine the union of all eligible hosts for the configured spreads
|
||||
// and collect the current instance count for each eligible host
|
||||
let mut eligible_hosts_instances: HashMap<String, usize> = HashMap::new();
|
||||
for (spread, _) in spread_requirements {
|
||||
for (host_id, host) in hosts {
|
||||
if spread.requirements.iter().all(|(key, value)| {
|
||||
host.labels
|
||||
.get(key)
|
||||
.map(|val| val == value)
|
||||
.unwrap_or(false)
|
||||
}) {
|
||||
let count = running_instances_per_host
|
||||
.get(host_id)
|
||||
.cloned()
|
||||
.unwrap_or(0);
|
||||
eligible_hosts_instances.insert(host_id.clone(), count);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Step 2: derive changeset from commands (for commands that share the same host_id, select the command with the highest instance count & idx is used as a tiebreaker)
|
||||
let mut changeset: HashMap<String, (usize, usize)> = HashMap::new();
|
||||
for (idx, command) in commands.iter().enumerate() {
|
||||
if let Command::ScaleComponent(ScaleComponent { host_id, count, .. }) = command {
|
||||
let entry = changeset.entry(host_id.clone()).or_insert((0, usize::MAX));
|
||||
if *count as usize > entry.0 || (*count as usize == entry.0 && idx < entry.1) {
|
||||
*entry = (*count as usize, idx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Apply changeset to the eligible_hosts_instances
|
||||
for (host_id, (count, _)) in changeset {
|
||||
if let Some(current_count) = eligible_hosts_instances.get_mut(&host_id) {
|
||||
*current_count = count;
|
||||
}
|
||||
}
|
||||
|
||||
// Step 3: Create a structure that maps a Spread to a tuple
|
||||
// (spread_eligible_hosts_total_instance_count_if_all_commands_are_applied, target_instance_count_based_on_spread_weight)
|
||||
let mut spread_instances: HashMap<String, (usize, usize)> = HashMap::new();
|
||||
for (spread, target_count) in spread_requirements {
|
||||
let projected_count: usize = eligible_hosts_instances
|
||||
.iter()
|
||||
.filter_map(|(host_id, count)| {
|
||||
if spread.requirements.iter().all(|(key, value)| {
|
||||
hosts
|
||||
.get(host_id)
|
||||
.unwrap()
|
||||
.labels
|
||||
.get(key)
|
||||
.map(|val| val == value)
|
||||
.unwrap_or(false)
|
||||
}) {
|
||||
Some(count)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.sum();
|
||||
|
||||
spread_instances.insert(spread.name.clone(), (projected_count, *target_count));
|
||||
}
|
||||
|
||||
// Step 4: Compare the tuples' values to detect conflicts
|
||||
let mut conflicts = Vec::new();
|
||||
for (spread_name, (projected_count, target_count)) in spread_instances {
|
||||
if projected_count != target_count {
|
||||
conflicts.push(format!(
|
||||
"Spread requirement conflict: {} spread requires {} instances vs {} computed from reconciliation commands",
|
||||
spread_name, target_count, projected_count
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
if conflicts.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(conflicts.join(", "))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
@ -481,8 +595,8 @@ mod test {
|
|||
|
||||
use anyhow::Result;
|
||||
use chrono::Utc;
|
||||
use wadm_types::{Spread, SpreadScalerProperty};
|
||||
use wasmcloud_control_interface::InterfaceLinkDefinition;
|
||||
use wadm_types::{api::StatusType, Spread, SpreadScalerProperty};
|
||||
use wasmcloud_control_interface::Link;
|
||||
|
||||
use crate::{
|
||||
commands::Command,
|
||||
|
@ -654,59 +768,78 @@ mod test {
|
|||
|
||||
#[tokio::test]
|
||||
async fn can_compute_spread_commands() -> Result<()> {
|
||||
let lattice_id = "hoohah_multi_stop_component";
|
||||
let lattice_id = "can_compute_spread_commands";
|
||||
let component_reference = "fakecloud.azurecr.io/echo:0.3.4".to_string();
|
||||
let component_id = "fakecloud_azurecr_io_echo_0_3_4".to_string();
|
||||
let host_id = "NASDASDIMAREALHOST";
|
||||
|
||||
let host_id1 = "HOST_ONE";
|
||||
let host_id2 = "HOST_TWO";
|
||||
let host_id3 = "HOST_THREE";
|
||||
|
||||
let store = Arc::new(TestStore::default());
|
||||
|
||||
// STATE SETUP BEGIN, ONE HOST
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
host_id.to_string(),
|
||||
Host {
|
||||
components: HashMap::new(),
|
||||
friendly_name: "hey".to_string(),
|
||||
labels: HashMap::new(),
|
||||
providers: HashSet::new(),
|
||||
uptime_seconds: 123,
|
||||
version: None,
|
||||
id: host_id.to_string(),
|
||||
last_seen: Utc::now(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
// Create three hosts with different labels
|
||||
let mut host1_labels = HashMap::new();
|
||||
host1_labels.insert("zone".to_string(), "east".to_string());
|
||||
|
||||
// Ensure we compute if a weights aren't specified
|
||||
let complex_spread = SpreadScalerProperty {
|
||||
let mut host2_labels = HashMap::new();
|
||||
host2_labels.insert("zone".to_string(), "west".to_string());
|
||||
|
||||
let mut host3_labels = HashMap::new();
|
||||
host3_labels.insert("zone".to_string(), "central".to_string());
|
||||
|
||||
// Store the hosts
|
||||
for (host_id, labels) in [
|
||||
(host_id1, host1_labels),
|
||||
(host_id2, host2_labels),
|
||||
(host_id3, host3_labels),
|
||||
] {
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
host_id.to_string(),
|
||||
Host {
|
||||
components: HashMap::new(),
|
||||
friendly_name: format!("host_{}", host_id),
|
||||
labels,
|
||||
providers: HashSet::new(),
|
||||
uptime_seconds: 123,
|
||||
version: None,
|
||||
id: host_id.to_string(),
|
||||
last_seen: Utc::now(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
// Create spread requirements that map to specific hosts
|
||||
let mut east_requirement = BTreeMap::new();
|
||||
east_requirement.insert("zone".to_string(), "east".to_string());
|
||||
|
||||
let mut west_requirement = BTreeMap::new();
|
||||
west_requirement.insert("zone".to_string(), "west".to_string());
|
||||
|
||||
let mut central_requirement = BTreeMap::new();
|
||||
central_requirement.insert("zone".to_string(), "central".to_string());
|
||||
|
||||
let spread_config = SpreadScalerProperty {
|
||||
instances: 103,
|
||||
spread: vec![
|
||||
Spread {
|
||||
// 9 + 1 (remainder trip)
|
||||
name: "ComplexOne".to_string(),
|
||||
requirements: BTreeMap::new(),
|
||||
name: "EastZone".to_string(),
|
||||
requirements: east_requirement, // Maps to host1
|
||||
weight: Some(42),
|
||||
},
|
||||
Spread {
|
||||
// 0
|
||||
name: "ComplexTwo".to_string(),
|
||||
requirements: BTreeMap::new(),
|
||||
name: "WestZone".to_string(),
|
||||
requirements: west_requirement, // Maps to host2
|
||||
weight: Some(3),
|
||||
},
|
||||
Spread {
|
||||
// 8
|
||||
name: "ComplexThree".to_string(),
|
||||
requirements: BTreeMap::new(),
|
||||
name: "CentralZone".to_string(),
|
||||
requirements: central_requirement, // Maps to host3
|
||||
weight: Some(37),
|
||||
},
|
||||
Spread {
|
||||
// 84 + 1 (remainder trip)
|
||||
name: "ComplexFour".to_string(),
|
||||
requirements: BTreeMap::new(),
|
||||
weight: Some(384),
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
|
@ -716,38 +849,46 @@ mod test {
|
|||
component_id.to_string(),
|
||||
lattice_id.to_string(),
|
||||
MODEL_NAME.to_string(),
|
||||
complex_spread,
|
||||
spread_config,
|
||||
"fake_component",
|
||||
vec![],
|
||||
);
|
||||
|
||||
let cmds = spreadscaler.reconcile().await?;
|
||||
assert_eq!(cmds.len(), 3);
|
||||
|
||||
// With weights 42:3:37 and total instances of 103
|
||||
// EastZone (east) should get (52 + 1) instances
|
||||
// WestZone (west) should get 3 instances
|
||||
// CentralZone (central) should get (46 + 1) instances
|
||||
|
||||
assert!(cmds.contains(&Command::ScaleComponent(ScaleComponent {
|
||||
component_id: component_id.to_string(),
|
||||
reference: component_reference.to_string(),
|
||||
host_id: host_id.to_string(),
|
||||
count: 10,
|
||||
host_id: host_id1.to_string(), // east zone
|
||||
count: 53,
|
||||
model_name: MODEL_NAME.to_string(),
|
||||
annotations: spreadscaler_annotations("ComplexOne", spreadscaler.id()),
|
||||
annotations: spreadscaler_annotations("EastZone", spreadscaler.id()),
|
||||
config: vec![]
|
||||
})));
|
||||
|
||||
assert!(cmds.contains(&Command::ScaleComponent(ScaleComponent {
|
||||
component_id: component_id.to_string(),
|
||||
reference: component_reference.to_string(),
|
||||
host_id: host_id.to_string(),
|
||||
count: 8,
|
||||
host_id: host_id2.to_string(), // west zone
|
||||
count: 3,
|
||||
model_name: MODEL_NAME.to_string(),
|
||||
annotations: spreadscaler_annotations("ComplexThree", spreadscaler.id()),
|
||||
annotations: spreadscaler_annotations("WestZone", spreadscaler.id()),
|
||||
config: vec![]
|
||||
})));
|
||||
|
||||
assert!(cmds.contains(&Command::ScaleComponent(ScaleComponent {
|
||||
component_id: component_id.to_string(),
|
||||
reference: component_reference.to_string(),
|
||||
host_id: host_id.to_string(),
|
||||
count: 85,
|
||||
host_id: host_id3.to_string(), // central zone
|
||||
count: 47,
|
||||
model_name: MODEL_NAME.to_string(),
|
||||
annotations: spreadscaler_annotations("ComplexFour", spreadscaler.id()),
|
||||
annotations: spreadscaler_annotations("CentralZone", spreadscaler.id()),
|
||||
config: vec![]
|
||||
})));
|
||||
|
||||
|
@ -1052,114 +1193,6 @@ mod test {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn can_handle_multiple_spread_matches() -> Result<()> {
|
||||
let lattice_id = "multiple_spread_matches";
|
||||
let component_reference = "fakecloud.azurecr.io/echo:0.3.4".to_string();
|
||||
let component_id = "fakecloud_azurecr_io_echo_0_3_4".to_string();
|
||||
let host_id = "NASDASDIMAREALHOST";
|
||||
|
||||
let store = Arc::new(TestStore::default());
|
||||
|
||||
// Run 75% in east, 25% on resilient hosts
|
||||
let real_spread = SpreadScalerProperty {
|
||||
instances: 20,
|
||||
spread: vec![
|
||||
Spread {
|
||||
name: "SimpleOne".to_string(),
|
||||
requirements: BTreeMap::from_iter([("region".to_string(), "east".to_string())]),
|
||||
weight: Some(75),
|
||||
},
|
||||
Spread {
|
||||
name: "SimpleTwo".to_string(),
|
||||
requirements: BTreeMap::from_iter([(
|
||||
"resilient".to_string(),
|
||||
"true".to_string(),
|
||||
)]),
|
||||
weight: Some(25),
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
let spreadscaler = ComponentSpreadScaler::new(
|
||||
store.clone(),
|
||||
component_reference.to_string(),
|
||||
component_id.to_string(),
|
||||
lattice_id.to_string(),
|
||||
MODEL_NAME.to_string(),
|
||||
real_spread,
|
||||
"fake_component",
|
||||
vec![],
|
||||
);
|
||||
|
||||
// STATE SETUP BEGIN, ONE HOST
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
host_id.to_string(),
|
||||
Host {
|
||||
components: HashMap::from_iter([(component_id.to_string(), 10)]),
|
||||
friendly_name: "hey".to_string(),
|
||||
labels: HashMap::from_iter([
|
||||
("region".to_string(), "east".to_string()),
|
||||
("resilient".to_string(), "true".to_string()),
|
||||
]),
|
||||
providers: HashSet::new(),
|
||||
uptime_seconds: 123,
|
||||
version: None,
|
||||
id: host_id.to_string(),
|
||||
last_seen: Utc::now(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
component_id.to_string(),
|
||||
Component {
|
||||
id: component_id.to_string(),
|
||||
name: "Faketor".to_string(),
|
||||
issuer: "AASDASDASDASD".to_string(),
|
||||
instances: HashMap::from_iter([(
|
||||
host_id.to_string(),
|
||||
// 10 instances on this host under the first spread
|
||||
HashSet::from_iter([WadmComponentInfo {
|
||||
count: 10,
|
||||
annotations: spreadscaler_annotations("SimpleOne", spreadscaler.id()),
|
||||
}]),
|
||||
)]),
|
||||
reference: component_reference.to_string(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
let cmds = spreadscaler.reconcile().await?;
|
||||
assert_eq!(cmds.len(), 2);
|
||||
|
||||
// Should be enforcing 10 instances per spread
|
||||
assert!(cmds.contains(&Command::ScaleComponent(ScaleComponent {
|
||||
component_id: "fakecloud_azurecr_io_echo_0_3_4".to_string(),
|
||||
reference: component_reference.to_string(),
|
||||
host_id: host_id.to_string(),
|
||||
count: 15,
|
||||
model_name: MODEL_NAME.to_string(),
|
||||
annotations: spreadscaler_annotations("SimpleOne", spreadscaler.id()),
|
||||
config: vec![]
|
||||
})));
|
||||
assert!(cmds.contains(&Command::ScaleComponent(ScaleComponent {
|
||||
component_id: "fakecloud_azurecr_io_echo_0_3_4".to_string(),
|
||||
reference: component_reference.to_string(),
|
||||
host_id: host_id.to_string(),
|
||||
count: 5,
|
||||
model_name: MODEL_NAME.to_string(),
|
||||
annotations: spreadscaler_annotations("SimpleTwo", spreadscaler.id()),
|
||||
config: vec![]
|
||||
})));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn calculates_proper_scale_commands() -> Result<()> {
|
||||
let lattice_id = "calculates_proper_scale_commands";
|
||||
|
@ -1499,7 +1532,7 @@ mod test {
|
|||
.is_empty());
|
||||
assert!(blobby_spreadscaler
|
||||
.handle_event(&Event::LinkdefSet(LinkdefSet {
|
||||
linkdef: InterfaceLinkDefinition::default()
|
||||
linkdef: Link::default()
|
||||
}))
|
||||
.await?
|
||||
.is_empty());
|
||||
|
@ -1676,4 +1709,358 @@ mod test {
|
|||
.iter()
|
||||
.any(|(id, _host)| *id == "NASDASDIMAREALHOST4"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn can_detect_spread_requirement_conflicts_1() -> Result<()> {
|
||||
let lattice_id = "spread_requirement_conflicts";
|
||||
let component_reference = "fakecloud.azurecr.io/echo:0.1.0".to_string();
|
||||
let component_id = "fakecloud_azurecr_io_echo_0_1_0".to_string();
|
||||
let component_name = "fakecomponent".to_string();
|
||||
|
||||
let store = Arc::new(TestStore::default());
|
||||
|
||||
let host_id_1 = "NASDASDIMAREALHOST1";
|
||||
let host_id_2 = "NASDASDIMAREALHOST2";
|
||||
let host_id_3 = "NASDASDIMAREALHOST3";
|
||||
let host_id_4 = "NASDASDIMAREALHOST4";
|
||||
|
||||
// Create hosts with the specified labels and add them to the store
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
host_id_1.to_string(),
|
||||
Host {
|
||||
components: HashMap::new(),
|
||||
friendly_name: "node1".to_string(),
|
||||
labels: HashMap::from_iter([
|
||||
("region".to_string(), "us-east-1".to_string()),
|
||||
("cloud".to_string(), "real".to_string()),
|
||||
]),
|
||||
providers: HashSet::new(),
|
||||
uptime_seconds: 123,
|
||||
version: None,
|
||||
id: host_id_1.to_string(),
|
||||
last_seen: Utc::now(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
host_id_2.to_string(),
|
||||
Host {
|
||||
components: HashMap::new(),
|
||||
friendly_name: "node2".to_string(),
|
||||
labels: HashMap::from_iter([
|
||||
("region".to_string(), "us-east-2".to_string()),
|
||||
("cloud".to_string(), "real".to_string()),
|
||||
]),
|
||||
providers: HashSet::new(),
|
||||
uptime_seconds: 123,
|
||||
version: None,
|
||||
id: host_id_2.to_string(),
|
||||
last_seen: Utc::now(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
host_id_3.to_string(),
|
||||
Host {
|
||||
components: HashMap::new(),
|
||||
friendly_name: "node3".to_string(),
|
||||
labels: HashMap::from_iter([
|
||||
("region".to_string(), "us-west-1".to_string()),
|
||||
("cloud".to_string(), "real".to_string()),
|
||||
]),
|
||||
providers: HashSet::new(),
|
||||
uptime_seconds: 123,
|
||||
version: None,
|
||||
id: host_id_3.to_string(),
|
||||
last_seen: Utc::now(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
host_id_4.to_string(),
|
||||
Host {
|
||||
components: HashMap::new(),
|
||||
friendly_name: "node4".to_string(),
|
||||
labels: HashMap::from_iter([
|
||||
("region".to_string(), "us-west-2".to_string()),
|
||||
("cloud".to_string(), "real".to_string()),
|
||||
]),
|
||||
providers: HashSet::new(),
|
||||
uptime_seconds: 123,
|
||||
version: None,
|
||||
id: host_id_4.to_string(),
|
||||
last_seen: Utc::now(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
let spread_property = SpreadScalerProperty {
|
||||
instances: 12,
|
||||
spread: vec![
|
||||
Spread {
|
||||
name: "eastcoast".to_string(),
|
||||
requirements: BTreeMap::from([("region".to_string(), "us-east-1".to_string())]),
|
||||
weight: Some(25),
|
||||
},
|
||||
Spread {
|
||||
name: "westcoast".to_string(),
|
||||
requirements: BTreeMap::from([("region".to_string(), "us-west-1".to_string())]),
|
||||
weight: Some(25),
|
||||
},
|
||||
Spread {
|
||||
name: "realcloud".to_string(),
|
||||
requirements: BTreeMap::from([("cloud".to_string(), "real".to_string())]),
|
||||
weight: Some(50),
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
let spreadscaler = ComponentSpreadScaler::new(
|
||||
store.clone(),
|
||||
component_reference.to_string(),
|
||||
component_id.to_string(),
|
||||
lattice_id.to_string(),
|
||||
MODEL_NAME.to_string(),
|
||||
spread_property,
|
||||
&component_name,
|
||||
vec![],
|
||||
);
|
||||
|
||||
spreadscaler.reconcile().await?;
|
||||
|
||||
// Check the status after reconciliation
|
||||
let status = spreadscaler.status().await;
|
||||
assert_eq!(status.status_type, StatusType::Failed,);
|
||||
println!("{:?}", status);
|
||||
assert!(status.message.contains(&format!(
|
||||
"Spread requirement conflict: {} spread requires {} instances",
|
||||
"realcloud", 6
|
||||
)));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn can_detect_spread_requirement_conflicts_2() -> Result<()> {
|
||||
let lattice_id = "spread_requirement_conflicts";
|
||||
let component_reference = "fakecloud.azurecr.io/echo:0.1.0";
|
||||
let component_id = "fakecloud_azurecr_io_echo_0_1_0";
|
||||
let component_name = "fakecomponent";
|
||||
|
||||
let store = Arc::new(TestStore::default());
|
||||
|
||||
let host_id_1 = "NASDASDIMAREALHOST1";
|
||||
let host_id_2 = "NASDASDIMAREALHOST2";
|
||||
let host_id_3 = "NASDASDIMAREALHOST3";
|
||||
let host_id_4 = "NASDASDIMAREALHOST4";
|
||||
|
||||
let spread_property = SpreadScalerProperty {
|
||||
instances: 12,
|
||||
spread: vec![
|
||||
Spread {
|
||||
name: "eastcoast".to_string(),
|
||||
requirements: BTreeMap::from([("region".to_string(), "us-east-1".to_string())]),
|
||||
weight: Some(25),
|
||||
},
|
||||
Spread {
|
||||
name: "westcoast".to_string(),
|
||||
requirements: BTreeMap::from([("region".to_string(), "us-west-1".to_string())]),
|
||||
weight: Some(25),
|
||||
},
|
||||
Spread {
|
||||
name: "realcloud".to_string(),
|
||||
requirements: BTreeMap::from([("cloud".to_string(), "real".to_string())]),
|
||||
weight: Some(50),
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
let spreadscaler = ComponentSpreadScaler::new(
|
||||
store.clone(),
|
||||
component_reference.to_string(),
|
||||
component_id.to_string(),
|
||||
lattice_id.to_string(),
|
||||
MODEL_NAME.to_string(),
|
||||
spread_property,
|
||||
component_name,
|
||||
vec![],
|
||||
);
|
||||
|
||||
// Create components with the specified labels and add them to the store
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
component_id.to_string(),
|
||||
Component {
|
||||
id: component_id.to_string(),
|
||||
name: component_name.to_string(),
|
||||
issuer: "AASDASDASDASD".to_string(),
|
||||
instances: HashMap::from_iter([
|
||||
(
|
||||
host_id_1.to_string(),
|
||||
// 1 (realcloud) + 11 (eastcoast) = 12 instances on this host
|
||||
HashSet::from_iter([
|
||||
WadmComponentInfo {
|
||||
count: 1,
|
||||
annotations: spreadscaler_annotations(
|
||||
"realcloud",
|
||||
spreadscaler.id(),
|
||||
),
|
||||
},
|
||||
WadmComponentInfo {
|
||||
count: 11,
|
||||
annotations: spreadscaler_annotations(
|
||||
"eastcoast",
|
||||
spreadscaler.id(),
|
||||
),
|
||||
},
|
||||
]),
|
||||
),
|
||||
(
|
||||
host_id_2.to_string(),
|
||||
// 0 instances on this host
|
||||
HashSet::from_iter([]),
|
||||
),
|
||||
(
|
||||
host_id_3.to_string(),
|
||||
// 2 (realcloud) + 33 (west) = 35 instances on this host
|
||||
HashSet::from_iter([
|
||||
WadmComponentInfo {
|
||||
count: 2,
|
||||
annotations: spreadscaler_annotations(
|
||||
"realcloud",
|
||||
spreadscaler.id(),
|
||||
),
|
||||
},
|
||||
WadmComponentInfo {
|
||||
count: 33,
|
||||
annotations: spreadscaler_annotations(
|
||||
"westcoast",
|
||||
spreadscaler.id(),
|
||||
),
|
||||
},
|
||||
]),
|
||||
),
|
||||
(
|
||||
host_id_4.to_string(),
|
||||
// 44 (realcloud) instances on this host
|
||||
HashSet::from_iter([WadmComponentInfo {
|
||||
count: 44,
|
||||
annotations: spreadscaler_annotations(
|
||||
"realcloud",
|
||||
spreadscaler.id(),
|
||||
),
|
||||
}]),
|
||||
),
|
||||
]),
|
||||
reference: component_reference.to_string(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Create hosts with the specified labels and add them to the store
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
host_id_1.to_string(),
|
||||
Host {
|
||||
components: HashMap::from_iter([(component_id.to_string(), 12)]),
|
||||
friendly_name: "node1".to_string(),
|
||||
labels: HashMap::from_iter([
|
||||
("region".to_string(), "us-east-1".to_string()),
|
||||
("cloud".to_string(), "real".to_string()),
|
||||
]),
|
||||
providers: HashSet::new(),
|
||||
uptime_seconds: 123,
|
||||
version: None,
|
||||
id: host_id_1.to_string(),
|
||||
last_seen: Utc::now(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
host_id_2.to_string(),
|
||||
Host {
|
||||
components: HashMap::from_iter([(component_id.to_string(), 0)]),
|
||||
friendly_name: "node2".to_string(),
|
||||
labels: HashMap::from_iter([
|
||||
("region".to_string(), "us-east-2".to_string()),
|
||||
("cloud".to_string(), "real".to_string()),
|
||||
]),
|
||||
providers: HashSet::new(),
|
||||
uptime_seconds: 123,
|
||||
version: None,
|
||||
id: host_id_2.to_string(),
|
||||
last_seen: Utc::now(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
host_id_3.to_string(),
|
||||
Host {
|
||||
components: HashMap::from_iter([(component_id.to_string(), 35)]),
|
||||
friendly_name: "node3".to_string(),
|
||||
labels: HashMap::from_iter([
|
||||
("region".to_string(), "us-west-1".to_string()),
|
||||
("cloud".to_string(), "real".to_string()),
|
||||
]),
|
||||
providers: HashSet::new(),
|
||||
uptime_seconds: 123,
|
||||
version: None,
|
||||
id: host_id_3.to_string(),
|
||||
last_seen: Utc::now(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
host_id_4.to_string(),
|
||||
Host {
|
||||
components: HashMap::from_iter([(component_id.to_string(), 44)]),
|
||||
friendly_name: "node4".to_string(),
|
||||
labels: HashMap::from_iter([
|
||||
("region".to_string(), "us-west-2".to_string()),
|
||||
("cloud".to_string(), "real".to_string()),
|
||||
]),
|
||||
providers: HashSet::new(),
|
||||
uptime_seconds: 123,
|
||||
version: None,
|
||||
id: host_id_4.to_string(),
|
||||
last_seen: Utc::now(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
spreadscaler.reconcile().await?;
|
||||
|
||||
// Check the status after reconciliation
|
||||
let status = spreadscaler.status().await;
|
||||
assert_eq!(status.status_type, StatusType::Failed,);
|
||||
println!("{:?}", status);
|
||||
assert!(status.message.contains(&format!(
|
||||
"Spread requirement conflict: {} spread requires {} instances",
|
||||
"realcloud", 6
|
||||
)));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,12 +7,16 @@ use anyhow::Result;
|
|||
use async_trait::async_trait;
|
||||
use tokio::sync::{OnceCell, RwLock};
|
||||
use tracing::{instrument, trace};
|
||||
use wadm_types::{api::StatusInfo, Spread, SpreadScalerProperty, TraitProperty};
|
||||
use wadm_types::{
|
||||
api::{StatusInfo, StatusType},
|
||||
Spread, SpreadScalerProperty, TraitProperty,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
commands::{Command, StartProvider, StopProvider},
|
||||
events::{
|
||||
Event, HostHeartbeat, HostStarted, HostStopped, ProviderInfo, ProviderStarted,
|
||||
ConfigSet, Event, HostHeartbeat, HostStarted, HostStopped, ProviderHealthCheckFailed,
|
||||
ProviderHealthCheckInfo, ProviderHealthCheckPassed, ProviderInfo, ProviderStarted,
|
||||
ProviderStopped,
|
||||
},
|
||||
scaler::{
|
||||
|
@ -22,7 +26,7 @@ use crate::{
|
|||
},
|
||||
Scaler,
|
||||
},
|
||||
storage::{Host, ReadStore},
|
||||
storage::{Host, Provider, ProviderStatus, ReadStore},
|
||||
SCALER_KEY,
|
||||
};
|
||||
|
||||
|
@ -115,6 +119,65 @@ impl<S: ReadStore + Send + Sync + Clone> Scaler for ProviderSpreadScaler<S> {
|
|||
{
|
||||
self.reconcile().await
|
||||
}
|
||||
// perform status updates for health check events
|
||||
Event::ProviderHealthCheckFailed(ProviderHealthCheckFailed {
|
||||
data: ProviderHealthCheckInfo { provider_id, .. },
|
||||
})
|
||||
| Event::ProviderHealthCheckPassed(ProviderHealthCheckPassed {
|
||||
data: ProviderHealthCheckInfo { provider_id, .. },
|
||||
}) if provider_id == &self.config.provider_id => {
|
||||
let provider = self
|
||||
.store
|
||||
.get::<Provider>(&self.config.lattice_id, &self.config.provider_id)
|
||||
.await?;
|
||||
|
||||
let unhealthy_providers = provider.map_or(0, |p| {
|
||||
p.hosts
|
||||
.values()
|
||||
.filter(|s| *s == &ProviderStatus::Failed)
|
||||
.count()
|
||||
});
|
||||
let status = self.status.read().await.to_owned();
|
||||
// update health status of scaler
|
||||
if let Some(status) = match (status, unhealthy_providers > 0) {
|
||||
// scaler is deployed but contains unhealthy providers
|
||||
(
|
||||
StatusInfo {
|
||||
status_type: StatusType::Deployed,
|
||||
..
|
||||
},
|
||||
true,
|
||||
) => Some(StatusInfo::failed(&format!(
|
||||
"Unhealthy provider on {} host(s)",
|
||||
unhealthy_providers
|
||||
))),
|
||||
// scaler can become unhealthy only if it was previously deployed
|
||||
// once scaler becomes healthy again revert back to deployed state
|
||||
// this is a workaround to detect unhealthy status until
|
||||
// StatusType::Unhealthy can be used
|
||||
(
|
||||
StatusInfo {
|
||||
status_type: StatusType::Failed,
|
||||
message,
|
||||
},
|
||||
false,
|
||||
) if message.starts_with("Unhealthy provider on") => {
|
||||
Some(StatusInfo::deployed(""))
|
||||
}
|
||||
// don't update status if scaler is not deployed
|
||||
_ => None,
|
||||
} {
|
||||
*self.status.write().await = status;
|
||||
}
|
||||
|
||||
// only status needs update no new commands required
|
||||
Ok(Vec::new())
|
||||
}
|
||||
Event::ConfigSet(ConfigSet { config_name })
|
||||
if self.config.provider_config.contains(config_name) =>
|
||||
{
|
||||
self.reconcile().await
|
||||
}
|
||||
// No other event impacts the job of this scaler so we can ignore it
|
||||
_ => Ok(Vec::new()),
|
||||
}
|
||||
|
@ -1206,4 +1269,274 @@ mod test {
|
|||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_healthy_providers_return_healthy_status() -> Result<()> {
|
||||
let lattice_id = "test_healthy_providers";
|
||||
let provider_ref = "fakecloud.azurecr.io/provider:3.2.1".to_string();
|
||||
let provider_id = "VASDASDIAMAREALPROVIDERPROVIDER";
|
||||
|
||||
let host_id_one = "NASDASDIMAREALHOSTONE";
|
||||
let host_id_two = "NASDASDIMAREALHOSTTWO";
|
||||
|
||||
let store = Arc::new(TestStore::default());
|
||||
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
host_id_one.to_string(),
|
||||
Host {
|
||||
components: HashMap::new(),
|
||||
friendly_name: "hey".to_string(),
|
||||
labels: HashMap::from_iter([
|
||||
("cloud".to_string(), "fake".to_string()),
|
||||
("region".to_string(), "us-noneofyourbusiness-1".to_string()),
|
||||
]),
|
||||
|
||||
providers: HashSet::from_iter([ProviderInfo {
|
||||
provider_id: provider_id.to_string(),
|
||||
provider_ref: provider_ref.to_string(),
|
||||
annotations: BTreeMap::default(),
|
||||
}]),
|
||||
uptime_seconds: 123,
|
||||
version: None,
|
||||
id: host_id_one.to_string(),
|
||||
last_seen: Utc::now(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Ensure we spread evenly with equal weights, clean division
|
||||
let multi_spread_even = SpreadScalerProperty {
|
||||
instances: 2,
|
||||
spread: vec![Spread {
|
||||
name: "SimpleOne".to_string(),
|
||||
requirements: BTreeMap::from_iter([("cloud".to_string(), "fake".to_string())]),
|
||||
weight: Some(100),
|
||||
}],
|
||||
};
|
||||
|
||||
let spreadscaler = ProviderSpreadScaler::new(
|
||||
store.clone(),
|
||||
ProviderSpreadConfig {
|
||||
lattice_id: lattice_id.to_string(),
|
||||
provider_reference: provider_ref.to_string(),
|
||||
provider_id: provider_id.to_string(),
|
||||
spread_config: multi_spread_even,
|
||||
model_name: MODEL_NAME.to_string(),
|
||||
provider_config: vec!["foobar".to_string()],
|
||||
},
|
||||
"fake_component",
|
||||
);
|
||||
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
host_id_two.to_string(),
|
||||
Host {
|
||||
components: HashMap::new(),
|
||||
friendly_name: "hey".to_string(),
|
||||
labels: HashMap::from_iter([
|
||||
("cloud".to_string(), "fake".to_string()),
|
||||
("region".to_string(), "us-yourhouse-1".to_string()),
|
||||
]),
|
||||
|
||||
providers: HashSet::from_iter([ProviderInfo {
|
||||
provider_id: provider_id.to_string(),
|
||||
provider_ref: provider_ref.to_string(),
|
||||
annotations: spreadscaler_annotations("SimpleOne", spreadscaler.id()),
|
||||
}]),
|
||||
|
||||
uptime_seconds: 123,
|
||||
version: None,
|
||||
id: host_id_two.to_string(),
|
||||
last_seen: Utc::now(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
provider_id.to_string(),
|
||||
Provider {
|
||||
id: provider_id.to_string(),
|
||||
name: "provider".to_string(),
|
||||
issuer: "issuer".to_string(),
|
||||
reference: provider_ref.to_string(),
|
||||
hosts: HashMap::from([
|
||||
(host_id_one.to_string(), ProviderStatus::Failed),
|
||||
(host_id_two.to_string(), ProviderStatus::Running),
|
||||
]),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
spreadscaler.reconcile().await?;
|
||||
spreadscaler
|
||||
.handle_event(&Event::ProviderHealthCheckFailed(
|
||||
ProviderHealthCheckFailed {
|
||||
data: ProviderHealthCheckInfo {
|
||||
provider_id: provider_id.to_string(),
|
||||
host_id: host_id_one.to_string(),
|
||||
},
|
||||
},
|
||||
))
|
||||
.await?;
|
||||
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
provider_id.to_string(),
|
||||
Provider {
|
||||
id: provider_id.to_string(),
|
||||
name: "provider".to_string(),
|
||||
issuer: "issuer".to_string(),
|
||||
reference: provider_ref.to_string(),
|
||||
hosts: HashMap::from([
|
||||
(host_id_one.to_string(), ProviderStatus::Pending),
|
||||
(host_id_two.to_string(), ProviderStatus::Running),
|
||||
]),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
spreadscaler
|
||||
.handle_event(&Event::ProviderHealthCheckPassed(
|
||||
ProviderHealthCheckPassed {
|
||||
data: ProviderHealthCheckInfo {
|
||||
provider_id: provider_id.to_string(),
|
||||
host_id: host_id_two.to_string(),
|
||||
},
|
||||
},
|
||||
))
|
||||
.await?;
|
||||
|
||||
assert_eq!(
|
||||
spreadscaler.status.read().await.to_owned(),
|
||||
StatusInfo::deployed("")
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_unhealthy_providers_return_unhealthy_status() -> Result<()> {
|
||||
let lattice_id = "test_unhealthy_providers";
|
||||
let provider_ref = "fakecloud.azurecr.io/provider:3.2.1".to_string();
|
||||
let provider_id = "VASDASDIAMAREALPROVIDERPROVIDER";
|
||||
|
||||
let host_id_one = "NASDASDIMAREALHOSTONE";
|
||||
let host_id_two = "NASDASDIMAREALHOSTTWO";
|
||||
|
||||
let store = Arc::new(TestStore::default());
|
||||
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
host_id_one.to_string(),
|
||||
Host {
|
||||
components: HashMap::new(),
|
||||
friendly_name: "hey".to_string(),
|
||||
labels: HashMap::from_iter([
|
||||
("cloud".to_string(), "fake".to_string()),
|
||||
("region".to_string(), "us-noneofyourbusiness-1".to_string()),
|
||||
]),
|
||||
|
||||
providers: HashSet::from_iter([ProviderInfo {
|
||||
provider_id: provider_id.to_string(),
|
||||
provider_ref: provider_ref.to_string(),
|
||||
annotations: BTreeMap::default(),
|
||||
}]),
|
||||
uptime_seconds: 123,
|
||||
version: None,
|
||||
id: host_id_one.to_string(),
|
||||
last_seen: Utc::now(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Ensure we spread evenly with equal weights, clean division
|
||||
let multi_spread_even = SpreadScalerProperty {
|
||||
instances: 2,
|
||||
spread: vec![Spread {
|
||||
name: "SimpleOne".to_string(),
|
||||
requirements: BTreeMap::from_iter([("cloud".to_string(), "fake".to_string())]),
|
||||
weight: Some(100),
|
||||
}],
|
||||
};
|
||||
|
||||
let spreadscaler = ProviderSpreadScaler::new(
|
||||
store.clone(),
|
||||
ProviderSpreadConfig {
|
||||
lattice_id: lattice_id.to_string(),
|
||||
provider_reference: provider_ref.to_string(),
|
||||
provider_id: provider_id.to_string(),
|
||||
spread_config: multi_spread_even,
|
||||
model_name: MODEL_NAME.to_string(),
|
||||
provider_config: vec!["foobar".to_string()],
|
||||
},
|
||||
"fake_component",
|
||||
);
|
||||
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
host_id_two.to_string(),
|
||||
Host {
|
||||
components: HashMap::new(),
|
||||
friendly_name: "hey".to_string(),
|
||||
labels: HashMap::from_iter([
|
||||
("cloud".to_string(), "fake".to_string()),
|
||||
("region".to_string(), "us-yourhouse-1".to_string()),
|
||||
]),
|
||||
|
||||
providers: HashSet::from_iter([ProviderInfo {
|
||||
provider_id: provider_id.to_string(),
|
||||
provider_ref: provider_ref.to_string(),
|
||||
annotations: spreadscaler_annotations("SimpleOne", spreadscaler.id()),
|
||||
}]),
|
||||
|
||||
uptime_seconds: 123,
|
||||
version: None,
|
||||
id: host_id_two.to_string(),
|
||||
last_seen: Utc::now(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
store
|
||||
.store(
|
||||
lattice_id,
|
||||
provider_id.to_string(),
|
||||
Provider {
|
||||
id: provider_id.to_string(),
|
||||
name: "provider".to_string(),
|
||||
issuer: "issuer".to_string(),
|
||||
reference: provider_ref.to_string(),
|
||||
hosts: HashMap::from([
|
||||
(host_id_one.to_string(), ProviderStatus::Failed),
|
||||
(host_id_two.to_string(), ProviderStatus::Running),
|
||||
]),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
spreadscaler.reconcile().await?;
|
||||
spreadscaler
|
||||
.handle_event(&Event::ProviderHealthCheckFailed(
|
||||
ProviderHealthCheckFailed {
|
||||
data: ProviderHealthCheckInfo {
|
||||
provider_id: provider_id.to_string(),
|
||||
host_id: host_id_one.to_string(),
|
||||
},
|
||||
},
|
||||
))
|
||||
.await?;
|
||||
|
||||
assert_eq!(
|
||||
spreadscaler.status.read().await.to_owned(),
|
||||
StatusInfo::failed("Unhealthy provider on 1 host(s)")
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,66 @@
|
|||
use anyhow::Result;
|
||||
use async_trait::async_trait;
|
||||
use wadm_types::{api::StatusInfo, TraitProperty};
|
||||
|
||||
use crate::{commands::Command, events::Event, scaler::Scaler};
|
||||
|
||||
/// The StatusScaler is a scaler that only reports a predefined status and does not perform any actions.
|
||||
/// It's primarily used as a placeholder for a scaler that wadm failed to initialize for reasons that
|
||||
/// couldn't be caught during deployment, and will not be fixed until a new version of the app is deployed.
|
||||
pub struct StatusScaler {
|
||||
id: String,
|
||||
kind: String,
|
||||
name: String,
|
||||
status: StatusInfo,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Scaler for StatusScaler {
|
||||
fn id(&self) -> &str {
|
||||
&self.id
|
||||
}
|
||||
|
||||
fn kind(&self) -> &str {
|
||||
&self.kind
|
||||
}
|
||||
|
||||
fn name(&self) -> String {
|
||||
self.name.to_string()
|
||||
}
|
||||
|
||||
async fn status(&self) -> StatusInfo {
|
||||
self.status.clone()
|
||||
}
|
||||
|
||||
async fn update_config(&mut self, _config: TraitProperty) -> Result<Vec<Command>> {
|
||||
Ok(vec![])
|
||||
}
|
||||
|
||||
async fn handle_event(&self, _event: &Event) -> Result<Vec<Command>> {
|
||||
Ok(Vec::with_capacity(0))
|
||||
}
|
||||
|
||||
async fn reconcile(&self) -> Result<Vec<Command>> {
|
||||
Ok(Vec::with_capacity(0))
|
||||
}
|
||||
|
||||
async fn cleanup(&self) -> Result<Vec<Command>> {
|
||||
Ok(Vec::with_capacity(0))
|
||||
}
|
||||
}
|
||||
|
||||
impl StatusScaler {
|
||||
pub fn new(
|
||||
id: impl AsRef<str>,
|
||||
kind: impl AsRef<str>,
|
||||
name: impl AsRef<str>,
|
||||
status: StatusInfo,
|
||||
) -> Self {
|
||||
StatusScaler {
|
||||
id: id.as_ref().to_string(),
|
||||
kind: kind.as_ref().to_string(),
|
||||
name: name.as_ref().to_string(),
|
||||
status,
|
||||
}
|
||||
}
|
||||
}
|
|
@ -2,7 +2,6 @@ use std::collections::HashMap;
|
|||
|
||||
use anyhow::anyhow;
|
||||
use async_nats::{jetstream::stream::Stream, Client, Message, Subject};
|
||||
use base64::{engine::general_purpose::STANDARD as B64decoder, Engine};
|
||||
use serde_json::json;
|
||||
use tracing::{debug, error, instrument, trace};
|
||||
use wadm_types::api::{ModelSummary, StatusInfo, StatusType};
|
||||
|
@ -11,8 +10,8 @@ use wadm_types::{
|
|||
api::{
|
||||
DeleteModelRequest, DeleteModelResponse, DeleteResult, DeployModelRequest,
|
||||
DeployModelResponse, DeployResult, GetModelRequest, GetModelResponse, GetResult,
|
||||
PutModelResponse, PutResult, Status, StatusResponse, StatusResult, UndeployModelRequest,
|
||||
VersionInfo, VersionResponse,
|
||||
ListModelsResponse, PutModelResponse, PutResult, Status, StatusResponse, StatusResult,
|
||||
UndeployModelRequest, VersionInfo, VersionResponse,
|
||||
},
|
||||
CapabilityProperties, Manifest, Properties,
|
||||
};
|
||||
|
@ -70,8 +69,7 @@ impl<P: Publisher> Handler<P> {
|
|||
self.send_error(
|
||||
msg.reply,
|
||||
format!(
|
||||
"Manifest name {} contains invalid characters. Manifest names can only contain alphanumeric characters, dashes, and underscores.",
|
||||
manifest_name
|
||||
"Manifest name {manifest_name} contains invalid characters. Manifest names can only contain alphanumeric characters, dashes, and underscores.",
|
||||
),
|
||||
)
|
||||
.await;
|
||||
|
@ -90,11 +88,47 @@ impl<P: Publisher> Handler<P> {
|
|||
}
|
||||
};
|
||||
|
||||
if let Some(error_message) = validate_manifest(manifest.clone()).await.err() {
|
||||
if let Some(error_message) = validate_manifest(&manifest).await.err() {
|
||||
self.send_error(msg.reply, error_message.to_string()).await;
|
||||
return;
|
||||
}
|
||||
|
||||
let all_stored_manifests = self
|
||||
.store
|
||||
.list(account_id, lattice_id)
|
||||
.await
|
||||
.unwrap_or_default();
|
||||
let deployed_shared_apps: Vec<&Manifest> = all_stored_manifests
|
||||
.iter()
|
||||
// Only keep deployed, shared applications
|
||||
.filter(|manifest| {
|
||||
manifest.deployed_version().is_some() && manifest.get_current().shared()
|
||||
})
|
||||
.map(|manifest| manifest.get_current())
|
||||
.collect();
|
||||
|
||||
// NOTE(brooksmtownsend): You can put an application with missing shared components, because
|
||||
// the time where you truly need them is when you deploy the application. This can cause a bit
|
||||
// of friction when it comes to deploy, but it avoids the frustrating race condition where you
|
||||
// - Put the application looking for a deployed shared component
|
||||
// - Undeploy the application with the shared component
|
||||
// - Deploy the new application looking for the shared component (error)
|
||||
let missing_shared_components = manifest.missing_shared_components(&deployed_shared_apps);
|
||||
let message = if missing_shared_components.is_empty() {
|
||||
format!(
|
||||
"Successfully put manifest {} {}",
|
||||
manifest_name,
|
||||
current_manifests.current_version().to_owned()
|
||||
)
|
||||
} else {
|
||||
format!(
|
||||
"Successfully put manifest {} {}, but some shared components are not deployed: {:?}",
|
||||
manifest_name,
|
||||
current_manifests.current_version().to_owned(),
|
||||
missing_shared_components
|
||||
)
|
||||
};
|
||||
|
||||
let incoming_version = manifest.version().to_owned();
|
||||
if !current_manifests.add_version(manifest) {
|
||||
self.send_error(
|
||||
|
@ -115,11 +149,7 @@ impl<P: Publisher> Handler<P> {
|
|||
},
|
||||
name: manifest_name.clone(),
|
||||
total_versions: current_manifests.count(),
|
||||
message: format!(
|
||||
"Successfully put manifest {} {}",
|
||||
manifest_name,
|
||||
current_manifests.current_version()
|
||||
),
|
||||
message,
|
||||
};
|
||||
|
||||
trace!(total_manifests = %resp.total_versions, "Storing manifests");
|
||||
|
@ -236,6 +266,45 @@ impl<P: Publisher> Handler<P> {
|
|||
.await
|
||||
}
|
||||
|
||||
// NOTE: This is the same as list_models but responds with just the list of models instead of using
|
||||
// the new wrapper type. This can be removed in 0.15.0 once clients all query the new subject
|
||||
#[instrument(level = "debug", skip(self, msg))]
|
||||
pub(crate) async fn list_models_deprecated(
|
||||
&self,
|
||||
msg: Message,
|
||||
account_id: Option<&str>,
|
||||
lattice_id: &str,
|
||||
) {
|
||||
let stored_manifests = match self.store.list(account_id, lattice_id).await {
|
||||
Ok(d) => d,
|
||||
Err(e) => {
|
||||
error!(error = %e, "Unable to fetch data");
|
||||
self.send_error(msg.reply, "Internal storage error".to_string())
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let application_summaries = stored_manifests.into_iter().map(|manifest| async {
|
||||
let status = self
|
||||
.get_manifest_status(lattice_id, manifest.name())
|
||||
.await
|
||||
.unwrap_or_else(|| {
|
||||
Status::new(StatusInfo::waiting(
|
||||
"Waiting for status: Lattice contains no hosts, deployment not started.",
|
||||
), vec![])
|
||||
});
|
||||
summary_from_manifest_status(manifest, status)
|
||||
});
|
||||
|
||||
let models: Vec<ModelSummary> = futures::future::join_all(application_summaries).await;
|
||||
|
||||
// NOTE: We _just_ deserialized this from the store above and then manually constructed it,
|
||||
// so we should be just fine. Just in case though, we unwrap to default
|
||||
self.send_reply(msg.reply, serde_json::to_vec(&models).unwrap_or_default())
|
||||
.await
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self, msg))]
|
||||
pub async fn list_models(&self, msg: Message, account_id: Option<&str>, lattice_id: &str) {
|
||||
let stored_manifests = match self.store.list(account_id, lattice_id).await {
|
||||
|
@ -260,11 +329,17 @@ impl<P: Publisher> Handler<P> {
|
|||
summary_from_manifest_status(manifest, status)
|
||||
});
|
||||
|
||||
let data: Vec<ModelSummary> = futures::future::join_all(application_summaries).await;
|
||||
let models: Vec<ModelSummary> = futures::future::join_all(application_summaries).await;
|
||||
|
||||
let reply = ListModelsResponse {
|
||||
result: GetResult::Success,
|
||||
message: "Successfully fetched list of applications".to_string(),
|
||||
models,
|
||||
};
|
||||
|
||||
// NOTE: We _just_ deserialized this from the store above and then manually constructed it,
|
||||
// so we should be just fine. Just in case though, we unwrap to default
|
||||
self.send_reply(msg.reply, serde_json::to_vec(&data).unwrap_or_default())
|
||||
self.send_reply(msg.reply, serde_json::to_vec(&reply).unwrap_or_default())
|
||||
.await
|
||||
}
|
||||
|
||||
|
@ -337,97 +412,110 @@ impl<P: Publisher> Handler<P> {
|
|||
}
|
||||
}
|
||||
};
|
||||
let reply_data = if let Some(version) = req.version {
|
||||
|
||||
// TODO(#451): if shared and deployed, make sure that no other shared apps are using it
|
||||
let reply_data = {
|
||||
match self.store.get(account_id, lattice_id, name).await {
|
||||
Ok(Some((mut current, current_revision))) => {
|
||||
let deleted = current.delete_version(&version);
|
||||
if deleted && !current.is_empty() {
|
||||
// If the version we deleted was the deployed one, undeploy it
|
||||
let deployed_version = current.deployed_version();
|
||||
let undeploy = if deployed_version.map(|v| v == version).unwrap_or(false) {
|
||||
trace!(?deployed_version, deleted_version = %version, "Deployed version matches deleted. Will undeploy");
|
||||
current.undeploy();
|
||||
true
|
||||
if let Some(version) = req.version {
|
||||
let deleted = current.delete_version(&version);
|
||||
if deleted && !current.is_empty() {
|
||||
// If the version we deleted was the deployed one, undeploy it
|
||||
let deployed_version = current.deployed_version();
|
||||
let undeploy = if deployed_version
|
||||
.map(|v| v == version)
|
||||
.unwrap_or(false)
|
||||
{
|
||||
trace!(?deployed_version, deleted_version = %version, "Deployed version matches deleted. Will undeploy");
|
||||
current.undeploy();
|
||||
true
|
||||
} else {
|
||||
trace!(?deployed_version, deleted_version = %version, "Deployed version does not match deleted version. Will not undeploy");
|
||||
false
|
||||
};
|
||||
self.store
|
||||
.set(account_id, lattice_id, current, Some(current_revision))
|
||||
.await
|
||||
.map(|_| DeleteModelResponse {
|
||||
result: DeleteResult::Deleted,
|
||||
message: format!(
|
||||
"Successfully deleted version {version} of application {name}"
|
||||
),
|
||||
undeploy,
|
||||
})
|
||||
.unwrap_or_else(|e| {
|
||||
error!(error = %e, "Unable to delete data");
|
||||
DeleteModelResponse {
|
||||
result: DeleteResult::Error,
|
||||
message: format!(
|
||||
"Internal storage error when deleting {version} of application {name}"
|
||||
),
|
||||
undeploy: false,
|
||||
}
|
||||
})
|
||||
} else if deleted && current.is_empty() {
|
||||
// If we deleted the last one, delete the model from the store
|
||||
self.store
|
||||
.delete(account_id, lattice_id, name)
|
||||
.await
|
||||
.map(|_| DeleteModelResponse {
|
||||
result: DeleteResult::Deleted,
|
||||
message: format!(
|
||||
"Successfully deleted last version of application {name}"
|
||||
),
|
||||
// By default if it is all gone, we definitely undeployed things
|
||||
undeploy: true,
|
||||
})
|
||||
.unwrap_or_else(|e| {
|
||||
error!(error = %e, "Unable to delete data");
|
||||
DeleteModelResponse {
|
||||
result: DeleteResult::Deleted,
|
||||
message: format!(
|
||||
"Internal storage error when deleting {version} of application {name}"
|
||||
),
|
||||
undeploy: false,
|
||||
}
|
||||
})
|
||||
} else {
|
||||
trace!(?deployed_version, deleted_version = %version, "Deployed version does not match deleted version. Will not undeploy");
|
||||
false
|
||||
};
|
||||
self.store
|
||||
.set(account_id, lattice_id, current, Some(current_revision))
|
||||
.await
|
||||
.map(|_| DeleteModelResponse {
|
||||
result: DeleteResult::Deleted,
|
||||
message: format!(
|
||||
"Successfully deleted version {version} of application {name}"
|
||||
),
|
||||
undeploy,
|
||||
})
|
||||
.unwrap_or_else(|e| {
|
||||
DeleteModelResponse {
|
||||
result: DeleteResult::Noop,
|
||||
message: format!("Application version {version} doesn't exist"),
|
||||
undeploy: false,
|
||||
}
|
||||
}
|
||||
} else {
|
||||
match self.store.delete(account_id, lattice_id, name).await {
|
||||
Ok(_) => {
|
||||
DeleteModelResponse {
|
||||
result: DeleteResult::Deleted,
|
||||
message: format!("Successfully deleted application {name}"),
|
||||
// By default if it is all gone, we definitely undeployed things
|
||||
undeploy: true,
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!(error = %e, "Unable to delete data");
|
||||
DeleteModelResponse {
|
||||
result: DeleteResult::Error,
|
||||
message: "Internal storage error".to_string(),
|
||||
message: format!(
|
||||
"Internal storage error when deleting application {name}"
|
||||
),
|
||||
undeploy: false,
|
||||
}
|
||||
})
|
||||
} else if deleted && current.is_empty() {
|
||||
// If we deleted the last one, delete the model from the store
|
||||
self.store
|
||||
.delete(account_id, lattice_id, name)
|
||||
.await
|
||||
.map(|_| DeleteModelResponse {
|
||||
result: DeleteResult::Deleted,
|
||||
message: format!(
|
||||
"Successfully deleted last version of application {name}"
|
||||
),
|
||||
// By default if it is all gone, we definitely undeployed things
|
||||
undeploy: true,
|
||||
})
|
||||
.unwrap_or_else(|e| {
|
||||
error!(error = %e, "Unable to delete data");
|
||||
DeleteModelResponse {
|
||||
result: DeleteResult::Deleted,
|
||||
message: "Internal storage error".to_string(),
|
||||
undeploy: false,
|
||||
}
|
||||
})
|
||||
} else {
|
||||
DeleteModelResponse {
|
||||
result: DeleteResult::Noop,
|
||||
message: format!("Application version {version} doesn't exist"),
|
||||
undeploy: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(None) => DeleteModelResponse {
|
||||
result: DeleteResult::Noop,
|
||||
message: format!("Application {name} doesn't exist"),
|
||||
message: format!("Application {name} doesn't exist or was already deleted"),
|
||||
undeploy: false,
|
||||
},
|
||||
Err(e) => {
|
||||
error!(error = %e, "Unable to fetch current data data");
|
||||
error!(error = %e, "Unable to fetch current manifest data for application {name}");
|
||||
DeleteModelResponse {
|
||||
result: DeleteResult::Error,
|
||||
message: "Internal storage error".to_string(),
|
||||
undeploy: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
match self.store.delete(account_id, lattice_id, name).await {
|
||||
Ok(_) => {
|
||||
DeleteModelResponse {
|
||||
result: DeleteResult::Deleted,
|
||||
message: format!("Successfully deleted application {}", name),
|
||||
// By default if it is all gone, we definitely undeployed things
|
||||
undeploy: true,
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!(error = %e, "Unable to delete data");
|
||||
DeleteModelResponse {
|
||||
result: DeleteResult::Error,
|
||||
message: "Internal storage error".to_string(),
|
||||
message: format!("Internal storage error while fetching manifest data for application {name}"),
|
||||
undeploy: false,
|
||||
}
|
||||
}
|
||||
|
@ -629,6 +717,25 @@ impl<P: Publisher> Handler<P> {
|
|||
}
|
||||
}
|
||||
|
||||
// TODO(#451): If this app is shared, or the previous version was, make sure that shared
|
||||
// components that have dependent applications are still present
|
||||
|
||||
let deployed_apps: Vec<&Manifest> = stored_models
|
||||
.iter()
|
||||
.filter(|a| a.deployed_version().is_some() && a.get_current().shared())
|
||||
.map(|a| a.get_current())
|
||||
.collect();
|
||||
let missing_shared_components = staged_model.missing_shared_components(&deployed_apps);
|
||||
|
||||
// Ensure all shared components point to a valid component that is deployed in another application
|
||||
if !missing_shared_components.is_empty() {
|
||||
self.send_error(
|
||||
msg.reply,
|
||||
format!("Application contains shared components that are not deployed in other applications: {:?}", missing_shared_components.iter().map(|c| &c.name).collect::<Vec<_>>())
|
||||
).await;
|
||||
return;
|
||||
}
|
||||
|
||||
if !manifests.deploy(req.version.clone()) {
|
||||
trace!("Requested version does not exist");
|
||||
self.send_reply(
|
||||
|
@ -757,6 +864,7 @@ impl<P: Publisher> Handler<P> {
|
|||
return;
|
||||
}
|
||||
};
|
||||
// TODO(#451): if shared, make sure that no other shared apps are using it
|
||||
|
||||
let reply = if manifests.undeploy() {
|
||||
trace!("Manifest undeployed. Storing updated manifest");
|
||||
|
@ -888,12 +996,9 @@ impl<P: Publisher> Handler<P> {
|
|||
.status_stream
|
||||
.get_last_raw_message_by_subject(&format!("wadm.status.{lattice_id}.{name}",))
|
||||
.await
|
||||
.map(|raw| {
|
||||
B64decoder
|
||||
.decode(raw.payload)
|
||||
.map(|b| serde_json::from_slice::<Status>(&b))
|
||||
}) {
|
||||
Ok(Ok(Ok(status))) => Some(status),
|
||||
.map(|status_msg| serde_json::from_slice::<Status>(&status_msg.payload))
|
||||
{
|
||||
Ok(Ok(status)) => Some(status),
|
||||
// Application status doesn't exist or is invalid, assuming undeployed
|
||||
_ => None,
|
||||
}
|
||||
|
@ -922,8 +1027,8 @@ fn summary_from_manifest_status(manifest: StoredManifest, status: Status) -> Mod
|
|||
}
|
||||
|
||||
// Manifest validation
|
||||
pub(crate) async fn validate_manifest(manifest: Manifest) -> anyhow::Result<()> {
|
||||
let failures = wadm_types::validation::validate_manifest(&manifest).await?;
|
||||
pub(crate) async fn validate_manifest(manifest: &Manifest) -> anyhow::Result<()> {
|
||||
let failures = wadm_types::validation::validate_manifest(manifest).await?;
|
||||
for failure in failures {
|
||||
if matches!(
|
||||
failure.level,
|
||||
|
@ -958,12 +1063,12 @@ mod test {
|
|||
let correct_manifest = deserialize_yaml("../../tests/fixtures/manifests/simple.yaml")
|
||||
.expect("Should be able to parse");
|
||||
|
||||
assert!(validate_manifest(correct_manifest).await.is_ok());
|
||||
assert!(validate_manifest(&correct_manifest).await.is_ok());
|
||||
|
||||
let manifest = deserialize_yaml("../../tests/fixtures/manifests/incorrect_component.yaml")
|
||||
.expect("Should be able to parse");
|
||||
|
||||
match validate_manifest(manifest).await {
|
||||
match validate_manifest(&manifest).await {
|
||||
Ok(()) => panic!("Should have detected incorrect component"),
|
||||
Err(e) => {
|
||||
assert!(e
|
||||
|
@ -975,7 +1080,7 @@ mod test {
|
|||
let manifest = deserialize_yaml("../../tests/fixtures/manifests/duplicate_component.yaml")
|
||||
.expect("Should be able to parse");
|
||||
|
||||
match validate_manifest(manifest).await {
|
||||
match validate_manifest(&manifest).await {
|
||||
Ok(()) => panic!("Should have detected duplicate component"),
|
||||
Err(e) => assert!(e
|
||||
.to_string()
|
||||
|
@ -985,7 +1090,7 @@ mod test {
|
|||
let manifest = deserialize_yaml("../../tests/fixtures/manifests/duplicate_id1.yaml")
|
||||
.expect("Should be able to parse");
|
||||
|
||||
match validate_manifest(manifest).await {
|
||||
match validate_manifest(&manifest).await {
|
||||
Ok(()) => {
|
||||
panic!("Should have detected duplicate component ID in provider properties")
|
||||
}
|
||||
|
@ -997,7 +1102,7 @@ mod test {
|
|||
let manifest = deserialize_yaml("../../tests/fixtures/manifests/duplicate_id2.yaml")
|
||||
.expect("Should be able to parse");
|
||||
|
||||
match validate_manifest(manifest).await {
|
||||
match validate_manifest(&manifest).await {
|
||||
Ok(()) => panic!("Should have detected duplicate component ID in component properties"),
|
||||
Err(e) => assert!(e
|
||||
.to_string()
|
||||
|
@ -1008,12 +1113,41 @@ mod test {
|
|||
deserialize_yaml("../../tests/fixtures/manifests/missing_capability_component.yaml")
|
||||
.expect("Should be able to parse");
|
||||
|
||||
match validate_manifest(manifest).await {
|
||||
match validate_manifest(&manifest).await {
|
||||
Ok(()) => panic!("Should have detected missing capability component"),
|
||||
Err(e) => assert!(e
|
||||
.to_string()
|
||||
.contains("The following capability component(s) are missing from the manifest: ")),
|
||||
}
|
||||
|
||||
let manifest = deserialize_yaml("../../tests/fixtures/manifests/duplicate_links.yaml")
|
||||
.expect("Should be able to parse");
|
||||
|
||||
match validate_manifest(&manifest).await {
|
||||
Ok(()) => panic!("Should have detected duplicate links"),
|
||||
Err(e) => assert!(e
|
||||
.to_string()
|
||||
.contains("Duplicate link found inside component")),
|
||||
}
|
||||
|
||||
let manifest =
|
||||
deserialize_yaml("../../tests/fixtures/manifests/correct_unique_interface_links.yaml")
|
||||
.expect("Should be able to parse");
|
||||
assert!(validate_manifest(&manifest).await.is_ok());
|
||||
|
||||
let manifest = deserialize_yaml(
|
||||
"../../tests/fixtures/manifests/incorrect_unique_interface_links.yaml",
|
||||
)
|
||||
.expect("Should be able to parse");
|
||||
match validate_manifest(&manifest).await {
|
||||
Ok(()) => panic!("Should have detected duplicate interface links"),
|
||||
Err(e) => assert!(
|
||||
e.to_string()
|
||||
.contains("Duplicate link found inside component")
|
||||
&& e.to_string().contains("atomics"),
|
||||
"Error should mention duplicate interface"
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
/// Ensure that a long image ref in a manifest works,
|
||||
|
@ -1021,7 +1155,7 @@ mod test {
|
|||
#[tokio::test]
|
||||
async fn manifest_name_long_image_ref() -> Result<()> {
|
||||
validate_manifest(
|
||||
deserialize_yaml("../../tests/fixtures/manifests/long_image_refs.yaml")
|
||||
&deserialize_yaml("../../tests/fixtures/manifests/long_image_refs.yaml")
|
||||
.context("failed to deserialize YAML")?,
|
||||
)
|
||||
.await
|
||||
|
|
|
@ -118,7 +118,12 @@ impl<P: Publisher> Server<P> {
|
|||
category: "model",
|
||||
operation: "list",
|
||||
object_name: None,
|
||||
} => self.handler.list_models(msg, account_id, lattice_id).await,
|
||||
} => {
|
||||
warn!("Received deprecated subject: model.list. Please use model.get instead");
|
||||
self.handler
|
||||
.list_models_deprecated(msg, account_id, lattice_id)
|
||||
.await
|
||||
}
|
||||
ParsedSubject {
|
||||
account_id,
|
||||
lattice_id,
|
||||
|
@ -130,6 +135,13 @@ impl<P: Publisher> Server<P> {
|
|||
.get_model(msg, account_id, lattice_id, name)
|
||||
.await
|
||||
}
|
||||
ParsedSubject {
|
||||
account_id,
|
||||
lattice_id,
|
||||
category: "model",
|
||||
operation: "get",
|
||||
object_name: None,
|
||||
} => self.handler.list_models(msg, account_id, lattice_id).await,
|
||||
ParsedSubject {
|
||||
account_id,
|
||||
lattice_id,
|
||||
|
|
|
@ -3,7 +3,7 @@ use std::sync::Arc;
|
|||
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::debug;
|
||||
use wasmcloud_control_interface::InterfaceLinkDefinition;
|
||||
use wasmcloud_control_interface::Link;
|
||||
use wasmcloud_secrets_types::SecretConfig;
|
||||
|
||||
use crate::storage::{Component, Host, Provider, ReadStore, StateKind};
|
||||
|
@ -28,7 +28,7 @@ pub struct SnapshotStore<S, L> {
|
|||
lattice_source: L,
|
||||
lattice_id: String,
|
||||
stored_state: Arc<RwLock<InMemoryData>>,
|
||||
links: Arc<RwLock<Vec<InterfaceLinkDefinition>>>,
|
||||
links: Arc<RwLock<Vec<Link>>>,
|
||||
}
|
||||
|
||||
impl<S, L> Clone for SnapshotStore<S, L>
|
||||
|
@ -165,7 +165,7 @@ where
|
|||
S: Send + Sync,
|
||||
L: Send + Sync,
|
||||
{
|
||||
async fn get_links(&self) -> anyhow::Result<Vec<InterfaceLinkDefinition>> {
|
||||
async fn get_links(&self) -> anyhow::Result<Vec<Link>> {
|
||||
Ok(self.links.read().await.clone())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
use std::{
|
||||
borrow::Borrow,
|
||||
collections::{BTreeMap, HashMap, HashSet},
|
||||
hash::{Hash, Hasher},
|
||||
};
|
||||
use std::borrow::{Borrow, ToOwned};
|
||||
use std::collections::{BTreeMap, HashMap, HashSet};
|
||||
use std::hash::{Hash, Hasher};
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use semver::Version;
|
||||
|
@ -33,7 +31,7 @@ pub struct Provider {
|
|||
pub hosts: HashMap<String, ProviderStatus>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
|
||||
pub enum ProviderStatus {
|
||||
/// The provider is starting and hasn't returned a heartbeat yet
|
||||
Pending,
|
||||
|
@ -42,6 +40,7 @@ pub enum ProviderStatus {
|
|||
/// The provider failed to start
|
||||
// TODO(thomastaylor312): In the future, we'll probably want to decay out a provider from state
|
||||
// if it hasn't had a heartbeat
|
||||
// if it fails a recent health check
|
||||
Failed,
|
||||
}
|
||||
|
||||
|
@ -286,8 +285,8 @@ impl From<HostHeartbeat> for Host {
|
|||
.into_iter()
|
||||
.map(|component| {
|
||||
(
|
||||
component.id, // SAFETY: Unlikely to not fit into a usize, but fallback just in case
|
||||
component.max_instances.try_into().unwrap_or(usize::MAX),
|
||||
component.id().into(), // SAFETY: Unlikely to not fit into a usize, but fallback just in case
|
||||
component.max_instances().try_into().unwrap_or(usize::MAX),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
@ -296,12 +295,13 @@ impl From<HostHeartbeat> for Host {
|
|||
.providers
|
||||
.into_iter()
|
||||
.map(|provider| ProviderInfo {
|
||||
provider_id: provider.id,
|
||||
provider_id: provider.id().to_string(),
|
||||
// NOTE: Provider should _always_ have an image ref. The control interface type should be updated.
|
||||
provider_ref: provider.image_ref.unwrap_or_default(),
|
||||
provider_ref: provider.image_ref().map(String::from).unwrap_or_default(),
|
||||
annotations: provider
|
||||
.annotations
|
||||
.map(|a| a.into_iter().collect())
|
||||
.annotations()
|
||||
.map(ToOwned::to_owned)
|
||||
.map(BTreeMap::from_iter)
|
||||
.unwrap_or_default(),
|
||||
})
|
||||
.collect();
|
||||
|
@ -326,9 +326,9 @@ impl From<&HostHeartbeat> for Host {
|
|||
.iter()
|
||||
.map(|component| {
|
||||
(
|
||||
component.id.to_owned(),
|
||||
component.id().to_owned(),
|
||||
// SAFETY: Unlikely to not fit into a usize, but fallback just in case
|
||||
component.max_instances.try_into().unwrap_or(usize::MAX),
|
||||
component.max_instances().try_into().unwrap_or(usize::MAX),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
@ -337,12 +337,12 @@ impl From<&HostHeartbeat> for Host {
|
|||
.providers
|
||||
.iter()
|
||||
.map(|provider| ProviderInfo {
|
||||
provider_id: provider.id.to_owned(),
|
||||
provider_ref: provider.image_ref.to_owned().unwrap_or_default(),
|
||||
provider_id: provider.id().to_owned(),
|
||||
provider_ref: provider.image_ref().map(String::from).unwrap_or_default(),
|
||||
annotations: provider
|
||||
.annotations
|
||||
.clone()
|
||||
.map(|a| a.into_iter().collect())
|
||||
.annotations()
|
||||
.map(ToOwned::to_owned)
|
||||
.map(BTreeMap::from_iter)
|
||||
.unwrap_or_default(),
|
||||
})
|
||||
.collect();
|
||||
|
|
|
@ -3,7 +3,7 @@ use std::{collections::HashMap, sync::Arc};
|
|||
|
||||
use serde::{de::DeserializeOwned, Serialize};
|
||||
use tokio::sync::RwLock;
|
||||
use wasmcloud_control_interface::{HostInventory, InterfaceLinkDefinition};
|
||||
use wasmcloud_control_interface::{HostInventory, Link};
|
||||
use wasmcloud_secrets_types::SecretConfig;
|
||||
|
||||
use crate::publisher::Publisher;
|
||||
|
@ -111,7 +111,7 @@ impl crate::storage::Store for TestStore {
|
|||
pub struct TestLatticeSource {
|
||||
pub claims: HashMap<String, Claims>,
|
||||
pub inventory: Arc<RwLock<HashMap<String, HostInventory>>>,
|
||||
pub links: Vec<InterfaceLinkDefinition>,
|
||||
pub links: Vec<Link>,
|
||||
pub config: HashMap<String, HashMap<String, String>>,
|
||||
}
|
||||
|
||||
|
@ -131,7 +131,7 @@ impl InventorySource for TestLatticeSource {
|
|||
|
||||
#[async_trait::async_trait]
|
||||
impl LinkSource for TestLatticeSource {
|
||||
async fn get_links(&self) -> anyhow::Result<Vec<InterfaceLinkDefinition>> {
|
||||
async fn get_links(&self) -> anyhow::Result<Vec<Link>> {
|
||||
Ok(self.links.clone())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -74,7 +74,7 @@ impl Worker for CommandWorker {
|
|||
trace!(command = ?ld, "Handling put linkdef command");
|
||||
// TODO(thomastaylor312): We should probably change ScopedMessage to allow us `pub`
|
||||
// access to the inner type so we don't have to clone, but no need to worry for now
|
||||
self.client.put_link(ld.clone().into()).await
|
||||
self.client.put_link(ld.clone().try_into()?).await
|
||||
}
|
||||
Command::DeleteLink(ld) => {
|
||||
trace!(command = ?ld, "Handling delete linkdef command");
|
||||
|
@ -101,9 +101,11 @@ impl Worker for CommandWorker {
|
|||
.map_err(|e| anyhow::anyhow!("{e:?}"));
|
||||
|
||||
match res {
|
||||
Ok(ack) if !ack.success => {
|
||||
Ok(ack) if !ack.succeeded() => {
|
||||
message.nack().await;
|
||||
Err(WorkError::Other(anyhow::anyhow!("{}", ack.message).into()))
|
||||
Err(WorkError::Other(
|
||||
anyhow::anyhow!("{}", ack.message()).into(),
|
||||
))
|
||||
}
|
||||
Ok(_) => message.ack().await.map_err(WorkError::from),
|
||||
Err(e) => {
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -6,7 +6,7 @@ use wasmcloud_secrets_types::SecretConfig;
|
|||
|
||||
use tracing::{debug, instrument, trace, warn};
|
||||
use wadm_types::api::Status;
|
||||
use wasmcloud_control_interface::{CtlResponse, HostInventory, InterfaceLinkDefinition};
|
||||
use wasmcloud_control_interface::{HostInventory, Link};
|
||||
|
||||
use crate::{commands::Command, publisher::Publisher, APP_SPEC_ANNOTATION};
|
||||
|
||||
|
@ -43,7 +43,7 @@ pub trait InventorySource {
|
|||
/// due to testing, but it does allow us to abstract away the concrete type of the client
|
||||
#[async_trait::async_trait]
|
||||
pub trait LinkSource {
|
||||
async fn get_links(&self) -> anyhow::Result<Vec<InterfaceLinkDefinition>>;
|
||||
async fn get_links(&self) -> anyhow::Result<Vec<Link>>;
|
||||
}
|
||||
|
||||
/// A trait for anything that can fetch a piece of named configuration
|
||||
|
@ -95,11 +95,8 @@ pub fn secret_config_from_map(map: HashMap<String, String>) -> anyhow::Result<Se
|
|||
impl ClaimsSource for wasmcloud_control_interface::Client {
|
||||
async fn get_claims(&self) -> anyhow::Result<HashMap<String, Claims>> {
|
||||
match self.get_claims().await.map_err(|e| anyhow::anyhow!("{e}")) {
|
||||
Ok(CtlResponse {
|
||||
success: true,
|
||||
response: Some(claims),
|
||||
..
|
||||
}) => {
|
||||
Ok(ctl_resp) if ctl_resp.succeeded() => {
|
||||
let claims = ctl_resp.data().context("missing claims data")?.to_owned();
|
||||
Ok(claims
|
||||
.into_iter()
|
||||
.filter_map(|mut claim| {
|
||||
|
@ -134,13 +131,12 @@ impl InventorySource for wasmcloud_control_interface::Client {
|
|||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e:?}"))?
|
||||
{
|
||||
CtlResponse {
|
||||
success: true,
|
||||
response: Some(host_inventory),
|
||||
..
|
||||
} => Ok(host_inventory),
|
||||
CtlResponse { message, .. } => Err(anyhow::anyhow!(
|
||||
"Failed to get inventory for host {host_id}, {message}"
|
||||
ctl_resp if ctl_resp.succeeded() && ctl_resp.data().is_some() => Ok(ctl_resp
|
||||
.into_data()
|
||||
.context("missing host inventory data")?),
|
||||
ctl_resp => Err(anyhow::anyhow!(
|
||||
"Failed to get inventory for host {host_id}, {}",
|
||||
ctl_resp.message()
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
@ -152,18 +148,19 @@ impl InventorySource for wasmcloud_control_interface::Client {
|
|||
// links
|
||||
#[async_trait::async_trait]
|
||||
impl LinkSource for wasmcloud_control_interface::Client {
|
||||
async fn get_links(&self) -> anyhow::Result<Vec<InterfaceLinkDefinition>> {
|
||||
async fn get_links(&self) -> anyhow::Result<Vec<Link>> {
|
||||
match self
|
||||
.get_links()
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e:?}"))?
|
||||
{
|
||||
CtlResponse {
|
||||
success: true,
|
||||
response: Some(links),
|
||||
..
|
||||
} => Ok(links),
|
||||
CtlResponse { message, .. } => Err(anyhow::anyhow!("Failed to get links, {message}")),
|
||||
ctl_resp if ctl_resp.succeeded() && ctl_resp.data().is_some() => {
|
||||
Ok(ctl_resp.into_data().context("missing link data")?)
|
||||
}
|
||||
ctl_resp => Err(anyhow::anyhow!(
|
||||
"Failed to get links, {}",
|
||||
ctl_resp.message()
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -176,15 +173,13 @@ impl ConfigSource for wasmcloud_control_interface::Client {
|
|||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e:?}"))?
|
||||
{
|
||||
CtlResponse {
|
||||
success: true,
|
||||
response: Some(config),
|
||||
..
|
||||
} => Ok(Some(config)),
|
||||
ctl_resp if ctl_resp.succeeded() && ctl_resp.data().is_some() => {
|
||||
Ok(ctl_resp.into_data())
|
||||
}
|
||||
// TODO(https://github.com/wasmCloud/wasmCloud/issues/1906): The control interface should return a None when config isn't found
|
||||
// instead of returning an error.
|
||||
CtlResponse { message, .. } => {
|
||||
debug!("Failed to get config for {name}, {message}");
|
||||
ctl_resp => {
|
||||
debug!("Failed to get config for {name}, {}", ctl_resp.message());
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
@ -199,21 +194,16 @@ impl SecretSource for wasmcloud_control_interface::Client {
|
|||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e:?}"))?
|
||||
{
|
||||
CtlResponse {
|
||||
success: true,
|
||||
response: Some(secret),
|
||||
..
|
||||
} => secret_config_from_map(secret).map(Some),
|
||||
CtlResponse {
|
||||
message,
|
||||
response: None,
|
||||
..
|
||||
} => {
|
||||
debug!("Failed to get secret for {name}, {message}");
|
||||
ctl_resp if ctl_resp.succeeded() && ctl_resp.data().is_some() => {
|
||||
secret_config_from_map(ctl_resp.into_data().context("missing secret data")?)
|
||||
.map(Some)
|
||||
}
|
||||
ctl_resp if ctl_resp.data().is_none() => {
|
||||
debug!("Failed to get secret for {name}, {}", ctl_resp.message());
|
||||
Ok(None)
|
||||
}
|
||||
CtlResponse { message, .. } => {
|
||||
debug!("Failed to get secret for {name}, {message}");
|
||||
ctl_resp => {
|
||||
debug!("Failed to get secret for {name}, {}", ctl_resp.message());
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,704 @@
|
|||
{
|
||||
"nodes": {
|
||||
"advisory-db": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1737565911,
|
||||
"narHash": "sha256-WxIWw1mSPJVU1JfIcTdIubU5UoIwwR8h7UcXop/6htg=",
|
||||
"owner": "rustsec",
|
||||
"repo": "advisory-db",
|
||||
"rev": "ffa26704690a3dc403edcd94baef103ee48f66eb",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "rustsec",
|
||||
"repo": "advisory-db",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"advisory-db_2": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1730464311,
|
||||
"narHash": "sha256-9xJoP1766XJSO1Qr0Lxg2P6dwPncTr3BJYlFMSXBd/E=",
|
||||
"owner": "rustsec",
|
||||
"repo": "advisory-db",
|
||||
"rev": "f3460e5ed91658ab94fa41908cfa44991f9f4f02",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "rustsec",
|
||||
"repo": "advisory-db",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"crane": {
|
||||
"locked": {
|
||||
"lastModified": 1737689766,
|
||||
"narHash": "sha256-ivVXYaYlShxYoKfSo5+y5930qMKKJ8CLcAoIBPQfJ6s=",
|
||||
"owner": "ipetkov",
|
||||
"repo": "crane",
|
||||
"rev": "6fe74265bbb6d016d663b1091f015e2976c4a527",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "ipetkov",
|
||||
"repo": "crane",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"crane_2": {
|
||||
"locked": {
|
||||
"lastModified": 1730652660,
|
||||
"narHash": "sha256-+XVYfmVXAiYA0FZT7ijHf555dxCe+AoAT5A6RU+6vSo=",
|
||||
"owner": "ipetkov",
|
||||
"repo": "crane",
|
||||
"rev": "a4ca93905455c07cb7e3aca95d4faf7601cba458",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "ipetkov",
|
||||
"repo": "crane",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"crane_3": {
|
||||
"inputs": {
|
||||
"flake-compat": "flake-compat",
|
||||
"flake-utils": [
|
||||
"wasmcloud",
|
||||
"nixify",
|
||||
"nix-log",
|
||||
"nixify",
|
||||
"flake-utils"
|
||||
],
|
||||
"nixpkgs": [
|
||||
"wasmcloud",
|
||||
"nixify",
|
||||
"nix-log",
|
||||
"nixify",
|
||||
"nixpkgs"
|
||||
],
|
||||
"rust-overlay": [
|
||||
"wasmcloud",
|
||||
"nixify",
|
||||
"nix-log",
|
||||
"nixify",
|
||||
"rust-overlay"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1679255352,
|
||||
"narHash": "sha256-nkGwGuNkhNrnN33S4HIDV5NzkzMLU5mNStRn9sZwq8c=",
|
||||
"owner": "rvolosatovs",
|
||||
"repo": "crane",
|
||||
"rev": "cec65880599a4ec6426186e24342e663464f5933",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "rvolosatovs",
|
||||
"ref": "feat/wit",
|
||||
"repo": "crane",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"fenix": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
],
|
||||
"rust-analyzer-src": []
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1738132439,
|
||||
"narHash": "sha256-7q5vsyPQf6/aQEKAOgZ4ggv++Z2ppPSuPCGKlbPcM88=",
|
||||
"owner": "nix-community",
|
||||
"repo": "fenix",
|
||||
"rev": "f94e521c1922784c377a2cace90aa89a6b8a1011",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"repo": "fenix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"fenix_2": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"wasmcloud",
|
||||
"nixify",
|
||||
"nixpkgs-nixos"
|
||||
],
|
||||
"rust-analyzer-src": "rust-analyzer-src"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1731047492,
|
||||
"narHash": "sha256-F4h8YtTzPWv0/1Z6fc8fMSqKpn7YhOjlgp66cr15tEo=",
|
||||
"owner": "nix-community",
|
||||
"repo": "fenix",
|
||||
"rev": "da6332e801fbb0418f80f20cefa947c5fe5c18c9",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"repo": "fenix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"fenix_3": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"wasmcloud",
|
||||
"nixify",
|
||||
"nix-log",
|
||||
"nixify",
|
||||
"nixpkgs"
|
||||
],
|
||||
"rust-analyzer-src": "rust-analyzer-src_2"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1679552560,
|
||||
"narHash": "sha256-L9Se/F1iLQBZFGrnQJO8c9wE5z0Mf8OiycPGP9Y96hA=",
|
||||
"owner": "nix-community",
|
||||
"repo": "fenix",
|
||||
"rev": "fb49a9f5605ec512da947a21cc7e4551a3950397",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"repo": "fenix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-compat": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1673956053,
|
||||
"narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils": {
|
||||
"inputs": {
|
||||
"systems": "systems"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1731533236,
|
||||
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils_2": {
|
||||
"inputs": {
|
||||
"systems": "systems_2"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1726560853,
|
||||
"narHash": "sha256-X6rJYSESBVr3hBoH0WbKE5KvhPU5bloyZ2L4K60/fPQ=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "c1dfcf08411b08f6b8615f7d8971a2bfa81d5e8a",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils_3": {
|
||||
"locked": {
|
||||
"lastModified": 1678901627,
|
||||
"narHash": "sha256-U02riOqrKKzwjsxc/400XnElV+UtPUQWpANPlyazjH0=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "93a2b84fc4b70d9e089d029deacc3583435c2ed6",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"macos-sdk": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1694769349,
|
||||
"narHash": "sha256-TEvVJy+NMPyzgWSk/6S29ZMQR+ICFxSdS3tw247uhFc=",
|
||||
"type": "tarball",
|
||||
"url": "https://github.com/roblabla/MacOSX-SDKs/releases/download/macosx14.0/MacOSX14.0.sdk.tar.xz"
|
||||
},
|
||||
"original": {
|
||||
"type": "tarball",
|
||||
"url": "https://github.com/roblabla/MacOSX-SDKs/releases/download/macosx14.0/MacOSX14.0.sdk.tar.xz"
|
||||
}
|
||||
},
|
||||
"nix-filter": {
|
||||
"locked": {
|
||||
"lastModified": 1730207686,
|
||||
"narHash": "sha256-SCHiL+1f7q9TAnxpasriP6fMarWE5H43t25F5/9e28I=",
|
||||
"owner": "numtide",
|
||||
"repo": "nix-filter",
|
||||
"rev": "776e68c1d014c3adde193a18db9d738458cd2ba4",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "nix-filter",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nix-filter_2": {
|
||||
"locked": {
|
||||
"lastModified": 1678109515,
|
||||
"narHash": "sha256-C2X+qC80K2C1TOYZT8nabgo05Dw2HST/pSn6s+n6BO8=",
|
||||
"owner": "numtide",
|
||||
"repo": "nix-filter",
|
||||
"rev": "aa9ff6ce4a7f19af6415fb3721eaa513ea6c763c",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "nix-filter",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nix-flake-tests": {
|
||||
"locked": {
|
||||
"lastModified": 1677844186,
|
||||
"narHash": "sha256-ErJZ/Gs1rxh561CJeWP5bohA2IcTq1rDneu1WT6CVII=",
|
||||
"owner": "antifuchs",
|
||||
"repo": "nix-flake-tests",
|
||||
"rev": "bbd9216bd0f6495bb961a8eb8392b7ef55c67afb",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "antifuchs",
|
||||
"repo": "nix-flake-tests",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nix-flake-tests_2": {
|
||||
"locked": {
|
||||
"lastModified": 1677844186,
|
||||
"narHash": "sha256-ErJZ/Gs1rxh561CJeWP5bohA2IcTq1rDneu1WT6CVII=",
|
||||
"owner": "antifuchs",
|
||||
"repo": "nix-flake-tests",
|
||||
"rev": "bbd9216bd0f6495bb961a8eb8392b7ef55c67afb",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "antifuchs",
|
||||
"repo": "nix-flake-tests",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nix-log": {
|
||||
"inputs": {
|
||||
"nix-flake-tests": "nix-flake-tests",
|
||||
"nixify": "nixify_2",
|
||||
"nixlib": "nixlib_2"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1681933283,
|
||||
"narHash": "sha256-phDsQdaoUEI4DUTErR6Tz7lS0y3kXvDwwbqtxpzd0eo=",
|
||||
"owner": "rvolosatovs",
|
||||
"repo": "nix-log",
|
||||
"rev": "833d31e3c1a677eac81ba87e777afa5076071d66",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "rvolosatovs",
|
||||
"repo": "nix-log",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nix-log_2": {
|
||||
"inputs": {
|
||||
"nix-flake-tests": "nix-flake-tests_2",
|
||||
"nixify": [
|
||||
"wasmcloud",
|
||||
"wit-deps",
|
||||
"nixify"
|
||||
],
|
||||
"nixlib": [
|
||||
"wasmcloud",
|
||||
"wit-deps",
|
||||
"nixlib"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1681933283,
|
||||
"narHash": "sha256-phDsQdaoUEI4DUTErR6Tz7lS0y3kXvDwwbqtxpzd0eo=",
|
||||
"owner": "rvolosatovs",
|
||||
"repo": "nix-log",
|
||||
"rev": "833d31e3c1a677eac81ba87e777afa5076071d66",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "rvolosatovs",
|
||||
"repo": "nix-log",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixify": {
|
||||
"inputs": {
|
||||
"advisory-db": "advisory-db_2",
|
||||
"crane": "crane_2",
|
||||
"fenix": "fenix_2",
|
||||
"flake-utils": "flake-utils_2",
|
||||
"macos-sdk": "macos-sdk",
|
||||
"nix-filter": "nix-filter",
|
||||
"nix-log": "nix-log",
|
||||
"nixlib": [
|
||||
"wasmcloud",
|
||||
"nixlib"
|
||||
],
|
||||
"nixpkgs-darwin": "nixpkgs-darwin",
|
||||
"nixpkgs-nixos": "nixpkgs-nixos",
|
||||
"rust-overlay": "rust-overlay_2"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1731068753,
|
||||
"narHash": "sha256-6H+vYAYl/koFsiBEM4WHZhOoOQ2Hfzd+MtcxFfAOOtw=",
|
||||
"owner": "rvolosatovs",
|
||||
"repo": "nixify",
|
||||
"rev": "7b83953ebfb22ba1f623ac06312aebee81f2182e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "rvolosatovs",
|
||||
"repo": "nixify",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixify_2": {
|
||||
"inputs": {
|
||||
"crane": "crane_3",
|
||||
"fenix": "fenix_3",
|
||||
"flake-utils": "flake-utils_3",
|
||||
"nix-filter": "nix-filter_2",
|
||||
"nixlib": "nixlib",
|
||||
"nixpkgs": "nixpkgs_2",
|
||||
"rust-overlay": "rust-overlay"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1679748566,
|
||||
"narHash": "sha256-yA4yIJjNCOLoUh0py9S3SywwbPnd/6NPYbXad+JeOl0=",
|
||||
"owner": "rvolosatovs",
|
||||
"repo": "nixify",
|
||||
"rev": "80e823959511a42dfec4409fef406a14ae8240f3",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "rvolosatovs",
|
||||
"repo": "nixify",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixlib": {
|
||||
"locked": {
|
||||
"lastModified": 1679187309,
|
||||
"narHash": "sha256-H8udmkg5wppL11d/05MMzOMryiYvc403axjDNZy1/TQ=",
|
||||
"owner": "nix-community",
|
||||
"repo": "nixpkgs.lib",
|
||||
"rev": "44214417fe4595438b31bdb9469be92536a61455",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"repo": "nixpkgs.lib",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixlib_2": {
|
||||
"locked": {
|
||||
"lastModified": 1679791877,
|
||||
"narHash": "sha256-tTV1Mf0hPWIMtqyU16Kd2JUBDWvfHlDC9pF57vcbgpQ=",
|
||||
"owner": "nix-community",
|
||||
"repo": "nixpkgs.lib",
|
||||
"rev": "cc060ddbf652a532b54057081d5abd6144d01971",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"repo": "nixpkgs.lib",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixlib_3": {
|
||||
"locked": {
|
||||
"lastModified": 1731200463,
|
||||
"narHash": "sha256-qDaAweJjdFbVExqs8aG27urUgcgKufkIngHW3Rzustg=",
|
||||
"owner": "nix-community",
|
||||
"repo": "nixpkgs.lib",
|
||||
"rev": "e04234d263750db01c78a412690363dc2226e68a",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-community",
|
||||
"repo": "nixpkgs.lib",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1738163270,
|
||||
"narHash": "sha256-B/7Y1v4y+msFFBW1JAdFjNvVthvNdJKiN6EGRPnqfno=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "59e618d90c065f55ae48446f307e8c09565d5ab0",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "release-24.11",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs-darwin": {
|
||||
"locked": {
|
||||
"lastModified": 1730891215,
|
||||
"narHash": "sha256-i85DPrhDuvzgvIWCpJlbfM2UFtNYbapo20MtQXsvay4=",
|
||||
"owner": "nixos",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "c128e44a249d6180740d0a979b6480d5b795c013",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nixos",
|
||||
"ref": "nixpkgs-24.05-darwin",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs-nixos": {
|
||||
"locked": {
|
||||
"lastModified": 1730883749,
|
||||
"narHash": "sha256-mwrFF0vElHJP8X3pFCByJR365Q2463ATp2qGIrDUdlE=",
|
||||
"owner": "nixos",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "dba414932936fde69f0606b4f1d87c5bc0003ede",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nixos",
|
||||
"ref": "nixos-24.05",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs_2": {
|
||||
"locked": {
|
||||
"lastModified": 1679577639,
|
||||
"narHash": "sha256-7u7bsNP0ApBnLgsHVROQ5ytoMqustmMVMgtaFS/P7EU=",
|
||||
"owner": "nixos",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "8f1bcd72727c5d4cd775545595d068be410f2a7e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nixos",
|
||||
"ref": "nixpkgs-22.11-darwin",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"advisory-db": "advisory-db",
|
||||
"crane": "crane",
|
||||
"fenix": "fenix",
|
||||
"flake-utils": "flake-utils",
|
||||
"nixpkgs": "nixpkgs",
|
||||
"wasmcloud": "wasmcloud"
|
||||
}
|
||||
},
|
||||
"rust-analyzer-src": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1730989300,
|
||||
"narHash": "sha256-ZWSta9893f/uF5PoRFn/BSUAxF4dKW+TIbdA6rZoGBg=",
|
||||
"owner": "rust-lang",
|
||||
"repo": "rust-analyzer",
|
||||
"rev": "1042a8c22c348491a4bade4f664430b03d6f5b5c",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "rust-lang",
|
||||
"ref": "nightly",
|
||||
"repo": "rust-analyzer",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"rust-analyzer-src_2": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1679520343,
|
||||
"narHash": "sha256-AJGSGWRfoKWD5IVTu1wEsR990wHbX0kIaolPqNMEh0c=",
|
||||
"owner": "rust-lang",
|
||||
"repo": "rust-analyzer",
|
||||
"rev": "eb791f31e688ae00908eb75d4c704ef60c430a92",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "rust-lang",
|
||||
"ref": "nightly",
|
||||
"repo": "rust-analyzer",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"rust-overlay": {
|
||||
"inputs": {
|
||||
"flake-utils": [
|
||||
"wasmcloud",
|
||||
"nixify",
|
||||
"nix-log",
|
||||
"nixify",
|
||||
"flake-utils"
|
||||
],
|
||||
"nixpkgs": [
|
||||
"wasmcloud",
|
||||
"nixify",
|
||||
"nix-log",
|
||||
"nixify",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1679537973,
|
||||
"narHash": "sha256-R6borgcKeyMIjjPeeYsfo+mT8UdS+OwwbhhStdCfEjg=",
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"rev": "fbc7ae3f14d32e78c0e8d7865f865cc28a46b232",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"rust-overlay_2": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"wasmcloud",
|
||||
"nixify",
|
||||
"nixpkgs-nixos"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1731032894,
|
||||
"narHash": "sha256-dQSyYPmrQiPr+PGEd+K8038rubFGz7G/dNXVeaGWE0w=",
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"rev": "d52f2a4c103a0acf09ded857b9e2519ae2360e59",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"systems": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"systems_2": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"wasmcloud": {
|
||||
"inputs": {
|
||||
"nixify": "nixify",
|
||||
"nixlib": "nixlib_3",
|
||||
"wit-deps": "wit-deps"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1731409523,
|
||||
"narHash": "sha256-Q/BnuJaMyJfY+p9VpdyBWtRjEo4TdRvFMMhfdDFj6cU=",
|
||||
"owner": "wasmCloud",
|
||||
"repo": "wasmCloud",
|
||||
"rev": "579455058513b907c7df4a4ec13728f83c6b782b",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "wasmCloud",
|
||||
"ref": "wash-cli-v0.37.0",
|
||||
"repo": "wasmCloud",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"wit-deps": {
|
||||
"inputs": {
|
||||
"nix-log": "nix-log_2",
|
||||
"nixify": [
|
||||
"wasmcloud",
|
||||
"nixify"
|
||||
],
|
||||
"nixlib": [
|
||||
"wasmcloud",
|
||||
"nixlib"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1727963723,
|
||||
"narHash": "sha256-urAGMGMH5ousEeVTZ5AaLPfowXaYQoISNXiutV00iQo=",
|
||||
"owner": "bytecodealliance",
|
||||
"repo": "wit-deps",
|
||||
"rev": "eb7c84564acfe13a4197bb15052fd2e2b3d29775",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "bytecodealliance",
|
||||
"ref": "v0.4.0",
|
||||
"repo": "wit-deps",
|
||||
"type": "github"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
|
@ -0,0 +1,264 @@
|
|||
{
|
||||
nixConfig.extra-substituters =
|
||||
[ "https://wasmcloud.cachix.org" "https://crane.cachix.org" ];
|
||||
nixConfig.extra-trusted-public-keys = [
|
||||
"wasmcloud.cachix.org-1:9gRBzsKh+x2HbVVspreFg/6iFRiD4aOcUQfXVDl3hiM="
|
||||
"crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk="
|
||||
];
|
||||
|
||||
description = "A flake for building and running wadm";
|
||||
|
||||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/release-24.11";
|
||||
|
||||
crane.url = "github:ipetkov/crane";
|
||||
|
||||
fenix = {
|
||||
url = "github:nix-community/fenix";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
inputs.rust-analyzer-src.follows = "";
|
||||
};
|
||||
|
||||
flake-utils.url = "github:numtide/flake-utils";
|
||||
|
||||
advisory-db = {
|
||||
url = "github:rustsec/advisory-db";
|
||||
flake = false;
|
||||
};
|
||||
|
||||
# The wash CLI flag is always after the latest host release tag we want
|
||||
wasmcloud.url = "github:wasmCloud/wasmCloud/wash-cli-v0.37.0";
|
||||
};
|
||||
|
||||
outputs =
|
||||
{ self, nixpkgs, crane, fenix, flake-utils, advisory-db, wasmcloud, ... }:
|
||||
flake-utils.lib.eachDefaultSystem (system:
|
||||
let
|
||||
pkgs = nixpkgs.legacyPackages.${system};
|
||||
|
||||
inherit (pkgs) lib;
|
||||
|
||||
craneLib = crane.mkLib pkgs;
|
||||
src = craneLib.cleanCargoSource ./.;
|
||||
|
||||
# Common arguments can be set here to avoid repeating them later
|
||||
commonArgs = {
|
||||
inherit src;
|
||||
strictDeps = true;
|
||||
|
||||
buildInputs = [
|
||||
# Add additional build inputs here
|
||||
] ++ lib.optionals pkgs.stdenv.isDarwin [
|
||||
# Additional darwin specific inputs can be set here if needed
|
||||
];
|
||||
|
||||
# Additional environment variables can be set directly here if needed
|
||||
# MY_CUSTOM_VAR = "some value";
|
||||
};
|
||||
|
||||
craneLibLLvmTools = craneLib.overrideToolchain
|
||||
(fenix.packages.${system}.complete.withComponents [
|
||||
"cargo"
|
||||
"llvm-tools"
|
||||
"rustc"
|
||||
]);
|
||||
|
||||
# Get the lock file for filtering
|
||||
rawLockFile = builtins.fromTOML (builtins.readFile ./Cargo.lock);
|
||||
|
||||
# Filter out the workspace members
|
||||
filteredLockFile = rawLockFile // {
|
||||
package = builtins.filter (x: !lib.strings.hasPrefix "wadm" x.name)
|
||||
rawLockFile.package;
|
||||
};
|
||||
|
||||
cargoVendorDir =
|
||||
craneLib.vendorCargoDeps { cargoLockParsed = filteredLockFile; };
|
||||
|
||||
cargoLock = craneLib.writeTOML "Cargo.lock" filteredLockFile;
|
||||
|
||||
# Build *just* the cargo dependencies (of the entire workspace), but we don't want to build
|
||||
# any of the other things in the crate to avoid rebuilding things in the dependencies when
|
||||
# we change workspace crate dependencies
|
||||
cargoArtifacts = let
|
||||
commonArgs' = removeAttrs commonArgs [ "src" ];
|
||||
|
||||
# Get the manifest file for filtering
|
||||
rawManifestFile = builtins.fromTOML (builtins.readFile ./Cargo.toml);
|
||||
|
||||
# Filter out the workspace members from manifest
|
||||
filteredManifestFile = with lib;
|
||||
let
|
||||
filterWadmAttrs =
|
||||
filterAttrs (name: _: !strings.hasPrefix "wadm" name);
|
||||
|
||||
workspace = removeAttrs rawManifestFile.workspace [ "members" ];
|
||||
in rawManifestFile // {
|
||||
workspace = workspace // {
|
||||
dependencies = filterWadmAttrs workspace.dependencies;
|
||||
package = workspace.package // {
|
||||
# pin version to avoid rebuilds on bumps
|
||||
version = "0.0.0";
|
||||
};
|
||||
};
|
||||
|
||||
dependencies = filterWadmAttrs rawManifestFile.dependencies;
|
||||
|
||||
dev-dependencies =
|
||||
filterWadmAttrs rawManifestFile.dev-dependencies;
|
||||
|
||||
build-dependencies =
|
||||
filterWadmAttrs rawManifestFile.build-dependencies;
|
||||
};
|
||||
|
||||
cargoToml = craneLib.writeTOML "Cargo.toml" filteredManifestFile;
|
||||
|
||||
dummySrc = craneLib.mkDummySrc {
|
||||
src = pkgs.runCommand "wadm-dummy-src" { } ''
|
||||
mkdir -p $out
|
||||
cp --recursive --no-preserve=mode,ownership ${src}/. -t $out
|
||||
cp ${cargoToml} $out/Cargo.toml
|
||||
'';
|
||||
};
|
||||
|
||||
args = commonArgs' // {
|
||||
inherit cargoLock cargoToml cargoVendorDir dummySrc;
|
||||
|
||||
cargoExtraArgs = ""; # disable `--locked` passed by default by crane
|
||||
};
|
||||
in craneLib.buildDepsOnly args;
|
||||
|
||||
individualCrateArgs = commonArgs // {
|
||||
inherit (craneLib.crateNameFromCargoToml { inherit src; }) version;
|
||||
# TODO(thomastaylor312) We run unit tests here and e2e tests externally. The nextest step
|
||||
# wasn't letting me pass in the fileset
|
||||
doCheck = true;
|
||||
};
|
||||
|
||||
fileSetForCrate = lib.fileset.toSource {
|
||||
root = ./.;
|
||||
fileset = lib.fileset.unions [
|
||||
./Cargo.toml
|
||||
./Cargo.lock
|
||||
./tests
|
||||
./oam
|
||||
(craneLib.fileset.commonCargoSources ./crates/wadm)
|
||||
(craneLib.fileset.commonCargoSources ./crates/wadm-client)
|
||||
(craneLib.fileset.commonCargoSources ./crates/wadm-types)
|
||||
];
|
||||
};
|
||||
|
||||
# Build the top-level crates of the workspace as individual derivations.
|
||||
# This allows consumers to only depend on (and build) only what they need.
|
||||
# Though it is possible to build the entire workspace as a single derivation,
|
||||
# so this is left up to you on how to organize things
|
||||
#
|
||||
# Note that the cargo workspace must define `workspace.members` using wildcards,
|
||||
# otherwise, omitting a crate (like we do below) will result in errors since
|
||||
# cargo won't be able to find the sources for all members.
|
||||
# TODO(thomastaylor312) I tried using `doInstallCargoArtifacts` and passing in things to the
|
||||
# next derivations as the `cargoArtifacts`, but that ended up always building things twice
|
||||
# rather than caching. We should look into it more and see if there's a way to make it work.
|
||||
wadm-lib = craneLib.cargoBuild (individualCrateArgs // {
|
||||
inherit cargoArtifacts;
|
||||
pname = "wadm";
|
||||
cargoExtraArgs = "-p wadm";
|
||||
src = fileSetForCrate;
|
||||
});
|
||||
wadm = craneLib.buildPackage (individualCrateArgs // {
|
||||
inherit cargoArtifacts;
|
||||
pname = "wadm-cli";
|
||||
cargoExtraArgs = "--bin wadm";
|
||||
src = fileSetForCrate;
|
||||
});
|
||||
wadm-client = craneLib.cargoBuild (individualCrateArgs // {
|
||||
inherit cargoArtifacts;
|
||||
pname = "wadm-client";
|
||||
cargoExtraArgs = "-p wadm-client";
|
||||
src = fileSetForCrate;
|
||||
});
|
||||
wadm-types = craneLib.cargoBuild (individualCrateArgs // {
|
||||
inherit cargoArtifacts;
|
||||
pname = "wadm-types";
|
||||
cargoExtraArgs = "-p wadm-types";
|
||||
src = fileSetForCrate;
|
||||
});
|
||||
in {
|
||||
checks = {
|
||||
# Build the crates as part of `nix flake check` for convenience
|
||||
inherit wadm wadm-client wadm-types;
|
||||
|
||||
# Run clippy (and deny all warnings) on the workspace source,
|
||||
# again, reusing the dependency artifacts from above.
|
||||
#
|
||||
# Note that this is done as a separate derivation so that
|
||||
# we can block the CI if there are issues here, but not
|
||||
# prevent downstream consumers from building our crate by itself.
|
||||
workspace-clippy = craneLib.cargoClippy (commonArgs // {
|
||||
inherit cargoArtifacts;
|
||||
cargoClippyExtraArgs = "--all-targets -- --deny warnings";
|
||||
});
|
||||
|
||||
workspace-doc =
|
||||
craneLib.cargoDoc (commonArgs // { inherit cargoArtifacts; });
|
||||
|
||||
# Check formatting
|
||||
workspace-fmt = craneLib.cargoFmt { inherit src; };
|
||||
|
||||
# Audit dependencies
|
||||
workspace-audit = craneLib.cargoAudit { inherit src advisory-db; };
|
||||
|
||||
# Audit licenses
|
||||
# my-workspace-deny = craneLib.cargoDeny {
|
||||
# inherit src;
|
||||
# };
|
||||
|
||||
# TODO: the wadm e2e tests use docker compose and things like `wash up` to test things
|
||||
# (which accesses network currently). We would need to fix those tests to do something
|
||||
# else to work properly. The low hanging fruit here would be to use the built artifact
|
||||
# in the e2e tests so we can output those binaries from the nix build and then just
|
||||
# run the tests from a separate repo. We could also do something like outputting the
|
||||
# prebuilt artifacts out into the current directory to save on build time. But that is
|
||||
# for later us to figure out
|
||||
runE2ETests = pkgs.runCommand "e2e-tests" {
|
||||
nativeBuildInputs = with pkgs;
|
||||
[
|
||||
nats-server
|
||||
# wasmcloud.wasmcloud
|
||||
];
|
||||
} ''
|
||||
touch $out
|
||||
'';
|
||||
};
|
||||
|
||||
packages = {
|
||||
inherit wadm wadm-client wadm-types wadm-lib;
|
||||
default = wadm;
|
||||
} // lib.optionalAttrs (!pkgs.stdenv.isDarwin) {
|
||||
workspace-llvm-coverage = craneLibLLvmTools.cargoLlvmCov
|
||||
(commonArgs // { inherit cargoArtifacts; });
|
||||
};
|
||||
|
||||
apps = {
|
||||
wadm = flake-utils.lib.mkApp { drv = wadm; };
|
||||
default = flake-utils.lib.mkApp { drv = wadm; };
|
||||
};
|
||||
|
||||
devShells.default = craneLib.devShell {
|
||||
# Inherit inputs from checks.
|
||||
checks = self.checks.${system};
|
||||
|
||||
RUST_SRC_PATH =
|
||||
"${pkgs.rust.packages.stable.rustPlatform.rustLibSrc}";
|
||||
|
||||
# Extra inputs can be added here; cargo and rustc are provided by default.
|
||||
packages = [
|
||||
pkgs.nats-server
|
||||
pkgs.natscli
|
||||
pkgs.docker
|
||||
pkgs.git
|
||||
wasmcloud.outputs.packages.${system}.default
|
||||
];
|
||||
};
|
||||
});
|
||||
}
|
199
oam.schema.json
199
oam.schema.json
|
@ -1,9 +1,14 @@
|
|||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"title": "Manifest",
|
||||
"description": "An OAM manifest",
|
||||
"description": "Manifest file based on the Open Application Model (OAM) specification for declaratively managing wasmCloud applications",
|
||||
"type": "object",
|
||||
"required": ["apiVersion", "kind", "metadata", "spec"],
|
||||
"required": [
|
||||
"apiVersion",
|
||||
"kind",
|
||||
"metadata",
|
||||
"spec"
|
||||
],
|
||||
"properties": {
|
||||
"apiVersion": {
|
||||
"description": "The OAM version of the manifest",
|
||||
|
@ -34,8 +39,18 @@
|
|||
"definitions": {
|
||||
"CapabilityProperties": {
|
||||
"type": "object",
|
||||
"required": ["image"],
|
||||
"properties": {
|
||||
"application": {
|
||||
"description": "Information to locate a component within a shared application. Cannot be specified if the image is specified.",
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/SharedApplicationComponentProperties"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"config": {
|
||||
"description": "Named configuration to pass to the provider. The merged set of configuration will be passed to the provider at runtime using the provider SDK's `init()` function.",
|
||||
"type": "array",
|
||||
|
@ -45,11 +60,17 @@
|
|||
},
|
||||
"id": {
|
||||
"description": "The component ID to use for this provider. If not supplied, it will be generated as a combination of the [Metadata::name] and the image reference.",
|
||||
"type": ["string", "null"]
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"image": {
|
||||
"description": "The image reference to use",
|
||||
"type": "string"
|
||||
"description": "The image reference to use. Required unless the component is a shared component that is defined in another shared application.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"secrets": {
|
||||
"description": "Named secret references to pass to the t. The provider will be able to retrieve these values at runtime using `wasmcloud:secrets/store`.",
|
||||
|
@ -67,32 +88,44 @@
|
|||
"oneOf": [
|
||||
{
|
||||
"type": "object",
|
||||
"required": ["properties", "type"],
|
||||
"required": [
|
||||
"properties",
|
||||
"type"
|
||||
],
|
||||
"properties": {
|
||||
"properties": {
|
||||
"$ref": "#/definitions/ComponentProperties"
|
||||
},
|
||||
"type": {
|
||||
"type": "string",
|
||||
"enum": ["component"]
|
||||
"enum": [
|
||||
"component"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"required": ["properties", "type"],
|
||||
"required": [
|
||||
"properties",
|
||||
"type"
|
||||
],
|
||||
"properties": {
|
||||
"properties": {
|
||||
"$ref": "#/definitions/CapabilityProperties"
|
||||
},
|
||||
"type": {
|
||||
"type": "string",
|
||||
"enum": ["capability"]
|
||||
"enum": [
|
||||
"capability"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"required": ["name"],
|
||||
"required": [
|
||||
"name"
|
||||
],
|
||||
"properties": {
|
||||
"name": {
|
||||
"description": "The name of this component",
|
||||
|
@ -100,7 +133,10 @@
|
|||
},
|
||||
"traits": {
|
||||
"description": "A list of various traits assigned to this component",
|
||||
"type": ["array", "null"],
|
||||
"type": [
|
||||
"array",
|
||||
"null"
|
||||
],
|
||||
"items": {
|
||||
"$ref": "#/definitions/Trait"
|
||||
}
|
||||
|
@ -109,8 +145,18 @@
|
|||
},
|
||||
"ComponentProperties": {
|
||||
"type": "object",
|
||||
"required": ["image"],
|
||||
"properties": {
|
||||
"application": {
|
||||
"description": "Information to locate a component within a shared application. Cannot be specified if the image is specified.",
|
||||
"anyOf": [
|
||||
{
|
||||
"$ref": "#/definitions/SharedApplicationComponentProperties"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
]
|
||||
},
|
||||
"config": {
|
||||
"description": "Named configuration to pass to the component. The component will be able to retrieve these values at runtime using `wasi:runtime/config.`",
|
||||
"type": "array",
|
||||
|
@ -120,11 +166,17 @@
|
|||
},
|
||||
"id": {
|
||||
"description": "The component ID to use for this component. If not supplied, it will be generated as a combination of the [Metadata::name] and the image reference.",
|
||||
"type": ["string", "null"]
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"image": {
|
||||
"description": "The image reference to use",
|
||||
"type": "string"
|
||||
"description": "The image reference to use. Required unless the component is a shared component that is defined in another shared application.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"secrets": {
|
||||
"description": "Named secret references to pass to the component. The component will be able to retrieve these values at runtime using `wasmcloud:secrets/store`.",
|
||||
|
@ -156,7 +208,9 @@
|
|||
"ConfigProperty": {
|
||||
"description": "Properties for the config list associated with components, providers, and links\n\n## Usage Defining a config block, like so: ```yaml source_config: - name: \"external-secret-kv\" - name: \"default-port\" properties: port: \"8080\" ```\n\nWill result in two config scalers being created, one with the name `basic-kv` and one with the name `default-port`. Wadm will not resolve collisions with configuration names between manifests.",
|
||||
"type": "object",
|
||||
"required": ["name"],
|
||||
"required": [
|
||||
"name"
|
||||
],
|
||||
"properties": {
|
||||
"name": {
|
||||
"description": "Name of the config to ensure exists",
|
||||
|
@ -164,7 +218,10 @@
|
|||
},
|
||||
"properties": {
|
||||
"description": "Optional properties to put with the configuration. If the properties are omitted in the manifest, wadm will assume that the configuration is externally managed and will not attempt to create it, only reporting the status as failed if not found.",
|
||||
"type": ["object", "null"],
|
||||
"type": [
|
||||
"object",
|
||||
"null"
|
||||
],
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
|
@ -175,7 +232,12 @@
|
|||
"LinkProperty": {
|
||||
"description": "Properties for links",
|
||||
"type": "object",
|
||||
"required": ["interfaces", "namespace", "package", "target"],
|
||||
"required": [
|
||||
"interfaces",
|
||||
"namespace",
|
||||
"package",
|
||||
"target"
|
||||
],
|
||||
"properties": {
|
||||
"interfaces": {
|
||||
"description": "WIT interfaces for the link",
|
||||
|
@ -186,7 +248,10 @@
|
|||
},
|
||||
"name": {
|
||||
"description": "The name of this link",
|
||||
"type": ["string", "null"]
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"namespace": {
|
||||
"description": "WIT namespace for the link",
|
||||
|
@ -210,7 +275,10 @@
|
|||
"source_config": {
|
||||
"deprecated": true,
|
||||
"writeOnly": true,
|
||||
"type": ["array", "null"],
|
||||
"type": [
|
||||
"array",
|
||||
"null"
|
||||
],
|
||||
"items": {
|
||||
"$ref": "#/definitions/ConfigProperty"
|
||||
}
|
||||
|
@ -226,17 +294,24 @@
|
|||
"target_config": {
|
||||
"deprecated": true,
|
||||
"writeOnly": true,
|
||||
"type": ["array", "null"],
|
||||
"type": [
|
||||
"array",
|
||||
"null"
|
||||
],
|
||||
"items": {
|
||||
"$ref": "#/definitions/ConfigProperty"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
"Metadata": {
|
||||
"description": "The metadata describing the manifest",
|
||||
"type": "object",
|
||||
"required": ["annotations", "name"],
|
||||
"required": [
|
||||
"annotations",
|
||||
"name"
|
||||
],
|
||||
"properties": {
|
||||
"annotations": {
|
||||
"description": "Optional data for annotating this manifest see <https://github.com/oam-dev/spec/blob/master/metadata.md#annotations-format>",
|
||||
|
@ -261,7 +336,11 @@
|
|||
"Policy": {
|
||||
"description": "A policy definition",
|
||||
"type": "object",
|
||||
"required": ["name", "properties", "type"],
|
||||
"required": [
|
||||
"name",
|
||||
"properties",
|
||||
"type"
|
||||
],
|
||||
"properties": {
|
||||
"name": {
|
||||
"description": "The name of this policy",
|
||||
|
@ -282,14 +361,17 @@
|
|||
},
|
||||
"SecretProperty": {
|
||||
"type": "object",
|
||||
"required": ["name", "properties"],
|
||||
"required": [
|
||||
"name",
|
||||
"properties"
|
||||
],
|
||||
"properties": {
|
||||
"name": {
|
||||
"description": "The name of the secret. This is used by a reference by the component or capability to get the secret value as a resource.",
|
||||
"type": "string"
|
||||
},
|
||||
"properties": {
|
||||
"description": "The of the secret. This indicates how to retrieve the secret value from a secrets backend and which backend to actually query.",
|
||||
"description": "The properties of the secret that indicate how to retrieve the secret value from a secrets backend and which backend to actually query.",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/SecretSourceProperty"
|
||||
|
@ -300,8 +382,18 @@
|
|||
},
|
||||
"SecretSourceProperty": {
|
||||
"type": "object",
|
||||
"required": ["key", "policy"],
|
||||
"required": [
|
||||
"key",
|
||||
"policy"
|
||||
],
|
||||
"properties": {
|
||||
"field": {
|
||||
"description": "The field to use for retrieving the secret from the backend. This is optional and can be used to retrieve a specific field from a secret.",
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
},
|
||||
"key": {
|
||||
"description": "The key to use for retrieving the secret from the backend.",
|
||||
"type": "string"
|
||||
|
@ -312,14 +404,36 @@
|
|||
},
|
||||
"version": {
|
||||
"description": "The version of the secret to retrieve. If not supplied, the latest version will be used.",
|
||||
"type": ["string", "null"]
|
||||
"type": [
|
||||
"string",
|
||||
"null"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"SharedApplicationComponentProperties": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"component",
|
||||
"name"
|
||||
],
|
||||
"properties": {
|
||||
"component": {
|
||||
"description": "The name of the component in the shared application",
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"description": "The name of the shared application",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"Specification": {
|
||||
"description": "A representation of an OAM specification",
|
||||
"type": "object",
|
||||
"required": ["components"],
|
||||
"required": [
|
||||
"components"
|
||||
],
|
||||
"properties": {
|
||||
"components": {
|
||||
"description": "The list of components for describing an application",
|
||||
|
@ -340,7 +454,10 @@
|
|||
"Spread": {
|
||||
"description": "Configuration for various spreading requirements",
|
||||
"type": "object",
|
||||
"required": ["name", "requirements"],
|
||||
"required": [
|
||||
"name",
|
||||
"requirements"
|
||||
],
|
||||
"properties": {
|
||||
"name": {
|
||||
"description": "The name of this spread requirement",
|
||||
|
@ -355,7 +472,10 @@
|
|||
},
|
||||
"weight": {
|
||||
"description": "An optional weight for this spread. Higher weights are given more precedence",
|
||||
"type": ["integer", "null"],
|
||||
"type": [
|
||||
"integer",
|
||||
"null"
|
||||
],
|
||||
"format": "uint",
|
||||
"minimum": 0.0
|
||||
}
|
||||
|
@ -365,7 +485,9 @@
|
|||
"SpreadScalerProperty": {
|
||||
"description": "Properties for spread scalers",
|
||||
"type": "object",
|
||||
"required": ["instances"],
|
||||
"required": [
|
||||
"instances"
|
||||
],
|
||||
"properties": {
|
||||
"instances": {
|
||||
"description": "Number of instances to spread across matching requirements",
|
||||
|
@ -385,7 +507,9 @@
|
|||
},
|
||||
"TargetConfig": {
|
||||
"type": "object",
|
||||
"required": ["name"],
|
||||
"required": [
|
||||
"name"
|
||||
],
|
||||
"properties": {
|
||||
"config": {
|
||||
"type": "array",
|
||||
|
@ -407,7 +531,10 @@
|
|||
},
|
||||
"Trait": {
|
||||
"type": "object",
|
||||
"required": ["properties", "type"],
|
||||
"required": [
|
||||
"properties",
|
||||
"type"
|
||||
],
|
||||
"properties": {
|
||||
"properties": {
|
||||
"description": "The properties of this trait",
|
||||
|
@ -437,4 +564,4 @@
|
|||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -18,7 +18,7 @@ The following is a list of the `traits` wasmCloud has added via customization to
|
|||
|
||||
## JSON Schema
|
||||
|
||||
A JSON schema is automatically generated from our Rust structures and is at the root of the repository: [oam.schema.json](../oam.schema.json).
|
||||
A JSON schema is automatically generated from our Rust structures and is at the root of the repository: [oam.schema.json](../oam.schema.json). You can regenerate the `oam.schema.json` file by running `cargo run --bin wadm-schema`.
|
||||
|
||||
## Example Application YAML
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@ spec:
|
|||
- name: webcap
|
||||
type: capability
|
||||
properties:
|
||||
image: ghcr.io/wasmcloud/http-server:0.21.0
|
||||
image: ghcr.io/wasmcloud/http-server:0.23.0
|
||||
# You can pass any config data you'd like sent to your provider as a string->string map
|
||||
config:
|
||||
- name: provider_config
|
||||
|
|
|
@ -21,7 +21,7 @@ spec:
|
|||
- name: httpserver
|
||||
type: capability
|
||||
properties:
|
||||
image: ghcr.io/wasmcloud/http-server:0.21.0
|
||||
image: ghcr.io/wasmcloud/http-server:0.23.0
|
||||
traits:
|
||||
# Link the HTTP server and set it to listen on the local machine's port 8080
|
||||
- type: link
|
||||
|
|
|
@ -37,7 +37,7 @@ spec:
|
|||
- name: httpserver
|
||||
type: capability
|
||||
properties:
|
||||
image: ghcr.io/wasmcloud/http-server:0.21.0
|
||||
image: ghcr.io/wasmcloud/http-server:0.23.0
|
||||
traits:
|
||||
# Compose with component to handle wasi:http calls
|
||||
- type: link
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
[toolchain]
|
||||
channel = "stable"
|
||||
components = ["clippy", "rust-src", "rustfmt"]
|
|
@ -1,9 +1,10 @@
|
|||
use std::io::IsTerminal;
|
||||
|
||||
use opentelemetry::sdk::{
|
||||
trace::{IdGenerator, Sampler},
|
||||
Resource,
|
||||
};
|
||||
use opentelemetry_otlp::{Protocol, WithExportConfig};
|
||||
use std::io::IsTerminal;
|
||||
use tracing::{Event as TracingEvent, Subscriber};
|
||||
use tracing_subscriber::fmt::{
|
||||
format::{Format, Full, Json, JsonFields, Writer},
|
||||
|
|
454
src/main.rs
454
src/main.rs
|
@ -1,448 +1,40 @@
|
|||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use async_nats::jetstream::{stream::Stream, Context};
|
||||
use anyhow::Context as _;
|
||||
use clap::Parser;
|
||||
use tokio::sync::Semaphore;
|
||||
use tracing::log::debug;
|
||||
use wadm_types::api::DEFAULT_WADM_TOPIC_PREFIX;
|
||||
use wadm::{config::WadmConfig, start_wadm};
|
||||
|
||||
use wadm::{
|
||||
consumers::{
|
||||
manager::{ConsumerManager, WorkerCreator},
|
||||
*,
|
||||
},
|
||||
nats_utils::LatticeIdParser,
|
||||
scaler::manager::{ScalerManager, WADM_NOTIFY_PREFIX},
|
||||
server::{ManifestNotifier, Server},
|
||||
storage::{nats_kv::NatsKvStore, reaper::Reaper},
|
||||
workers::{CommandPublisher, CommandWorker, EventWorker, StatusPublisher},
|
||||
DEFAULT_COMMANDS_TOPIC, DEFAULT_EVENTS_TOPIC, DEFAULT_MULTITENANT_EVENTS_TOPIC,
|
||||
DEFAULT_STATUS_TOPIC, DEFAULT_WADM_EVENTS_TOPIC, DEFAULT_WADM_EVENT_CONSUMER_TOPIC,
|
||||
};
|
||||
|
||||
mod connections;
|
||||
mod logging;
|
||||
mod nats;
|
||||
mod observer;
|
||||
|
||||
use connections::ControlClientConstructor;
|
||||
|
||||
const WADM_EVENT_STREAM_NAME: &str = "wadm_events";
|
||||
const WADM_EVENT_CONSUMER_STREAM_NAME: &str = "wadm_event_consumer";
|
||||
const COMMAND_STREAM_NAME: &str = "wadm_commands";
|
||||
const STATUS_STREAM_NAME: &str = "wadm_status";
|
||||
const NOTIFY_STREAM_NAME: &str = "wadm_notify";
|
||||
const WASMBUS_EVENT_STREAM_NAME: &str = "wasmbus_events";
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(name = clap::crate_name!(), version = clap::crate_version!(), about = "wasmCloud Application Deployment Manager", long_about = None)]
|
||||
struct Args {
|
||||
/// The ID for this wadm process. Defaults to a random UUIDv4 if none is provided. This is used
|
||||
/// to help with debugging when identifying which process is doing the work
|
||||
#[arg(short = 'i', long = "host-id", env = "WADM_HOST_ID")]
|
||||
host_id: Option<String>,
|
||||
|
||||
/// Whether or not to use structured log output (as JSON)
|
||||
#[arg(
|
||||
short = 'l',
|
||||
long = "structured-logging",
|
||||
default_value = "false",
|
||||
env = "WADM_STRUCTURED_LOGGING"
|
||||
)]
|
||||
structured_logging: bool,
|
||||
|
||||
/// Whether or not to enable opentelemetry tracing
|
||||
#[arg(
|
||||
short = 't',
|
||||
long = "tracing",
|
||||
default_value = "false",
|
||||
env = "WADM_TRACING_ENABLED"
|
||||
)]
|
||||
tracing_enabled: bool,
|
||||
|
||||
/// The endpoint to use for tracing. Setting this flag enables tracing, even if --tracing is set
|
||||
/// to false. Defaults to http://localhost:4318/v1/traces if not set and tracing is enabled
|
||||
#[arg(short = 'e', long = "tracing-endpoint", env = "WADM_TRACING_ENDPOINT")]
|
||||
tracing_endpoint: Option<String>,
|
||||
|
||||
/// The NATS JetStream domain to connect to
|
||||
#[arg(short = 'd', env = "WADM_JETSTREAM_DOMAIN")]
|
||||
domain: Option<String>,
|
||||
|
||||
/// (Advanced) Tweak the maximum number of jobs to run for handling events and commands. Be
|
||||
/// careful how you use this as it can affect performance
|
||||
#[arg(short = 'j', long = "max-jobs", env = "WADM_MAX_JOBS")]
|
||||
max_jobs: Option<usize>,
|
||||
|
||||
/// The URL of the nats server you want to connect to
|
||||
#[arg(
|
||||
short = 's',
|
||||
long = "nats-server",
|
||||
env = "WADM_NATS_SERVER",
|
||||
default_value = "127.0.0.1:4222"
|
||||
)]
|
||||
nats_server: String,
|
||||
|
||||
/// Use the specified nkey file or seed literal for authentication. Must be used in conjunction with --nats-jwt
|
||||
#[arg(
|
||||
long = "nats-seed",
|
||||
env = "WADM_NATS_NKEY",
|
||||
conflicts_with = "nats_creds",
|
||||
requires = "nats_jwt"
|
||||
)]
|
||||
nats_seed: Option<String>,
|
||||
|
||||
/// Use the specified jwt file or literal for authentication. Must be used in conjunction with --nats-nkey
|
||||
#[arg(
|
||||
long = "nats-jwt",
|
||||
env = "WADM_NATS_JWT",
|
||||
conflicts_with = "nats_creds",
|
||||
requires = "nats_seed"
|
||||
)]
|
||||
nats_jwt: Option<String>,
|
||||
|
||||
/// (Optional) NATS credential file to use when authenticating
|
||||
#[arg(
|
||||
long = "nats-creds-file",
|
||||
env = "WADM_NATS_CREDS_FILE",
|
||||
conflicts_with_all = ["nats_seed", "nats_jwt"],
|
||||
)]
|
||||
nats_creds: Option<PathBuf>,
|
||||
|
||||
/// (Optional) NATS TLS certificate file to use when authenticating
|
||||
#[arg(long = "nats-tls-ca-file", env = "WADM_NATS_TLS_CA_FILE")]
|
||||
nats_tls_ca_file: Option<PathBuf>,
|
||||
|
||||
/// Name of the bucket used for storage of lattice state
|
||||
#[arg(
|
||||
long = "state-bucket-name",
|
||||
env = "WADM_STATE_BUCKET_NAME",
|
||||
default_value = "wadm_state"
|
||||
)]
|
||||
state_bucket: String,
|
||||
|
||||
/// The amount of time in seconds to give for hosts to fail to heartbeat and be removed from the
|
||||
/// store. By default, this is 70s because it is 2x the host heartbeat interval plus a little padding
|
||||
#[arg(
|
||||
long = "cleanup-interval",
|
||||
env = "WADM_CLEANUP_INTERVAL",
|
||||
default_value = "70"
|
||||
)]
|
||||
cleanup_interval: u64,
|
||||
|
||||
/// The API topic prefix to use. This is an advanced setting that should only be used if you
|
||||
/// know what you are doing
|
||||
#[arg(
|
||||
long = "api-prefix",
|
||||
env = "WADM_API_PREFIX",
|
||||
default_value = DEFAULT_WADM_TOPIC_PREFIX
|
||||
)]
|
||||
api_prefix: String,
|
||||
|
||||
/// This prefix to used for the internal streams. When running in a multitenant environment,
|
||||
/// clients share the same JS domain (since messages need to come from lattices).
|
||||
/// Setting a stream prefix makes it possible to have a separate stream for different wadms running in a multitenant environment.
|
||||
/// This is an advanced setting that should only be used if you know what you are doing.
|
||||
#[arg(long = "stream-prefix", env = "WADM_STREAM_PREFIX")]
|
||||
stream_prefix: Option<String>,
|
||||
|
||||
/// Name of the bucket used for storage of manifests
|
||||
#[arg(
|
||||
long = "manifest-bucket-name",
|
||||
env = "WADM_MANIFEST_BUCKET_NAME",
|
||||
default_value = "wadm_manifests"
|
||||
)]
|
||||
manifest_bucket: String,
|
||||
|
||||
/// Run wadm in multitenant mode. This is for advanced multitenant use cases with segmented NATS
|
||||
/// account traffic and not simple cases where all lattices use credentials from the same
|
||||
/// account. See the deployment guide for more information
|
||||
#[arg(long = "multitenant", env = "WADM_MULTITENANT", hide = true)]
|
||||
multitenant: bool,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let args = Args::parse();
|
||||
let args = WadmConfig::parse();
|
||||
|
||||
logging::configure_tracing(
|
||||
args.structured_logging,
|
||||
args.tracing_enabled,
|
||||
args.tracing_endpoint,
|
||||
args.tracing_endpoint.clone(),
|
||||
);
|
||||
|
||||
// Build storage adapter for lattice state (on by default)
|
||||
let (client, context) = nats::get_client_and_context(
|
||||
args.nats_server.clone(),
|
||||
args.domain.clone(),
|
||||
args.nats_seed.clone(),
|
||||
args.nats_jwt.clone(),
|
||||
args.nats_creds.clone(),
|
||||
args.nats_tls_ca_file.clone(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// TODO: We will probably need to set up all the flags (like lattice prefix and topic prefix) down the line
|
||||
let connection_pool = ControlClientConstructor::new(client.clone(), None);
|
||||
|
||||
let trimmer: &[_] = &['.', '>', '*'];
|
||||
|
||||
let store = nats::ensure_kv_bucket(&context, args.state_bucket, 1).await?;
|
||||
|
||||
let state_storage = NatsKvStore::new(store);
|
||||
|
||||
let manifest_storage = nats::ensure_kv_bucket(&context, args.manifest_bucket, 1).await?;
|
||||
|
||||
let internal_stream_name = |stream_name: &str| -> String {
|
||||
match args.stream_prefix.clone() {
|
||||
Some(stream_prefix) => {
|
||||
format!(
|
||||
"{}.{}",
|
||||
stream_prefix.trim_end_matches(trimmer),
|
||||
stream_name
|
||||
)
|
||||
}
|
||||
None => stream_name.to_string(),
|
||||
}
|
||||
};
|
||||
|
||||
debug!("Ensuring wadm event stream");
|
||||
|
||||
let event_stream = nats::ensure_limits_stream(
|
||||
&context,
|
||||
internal_stream_name(WADM_EVENT_STREAM_NAME),
|
||||
vec![DEFAULT_WADM_EVENTS_TOPIC.to_owned()],
|
||||
Some(
|
||||
"A stream that stores all events coming in on the wadm.evt subject in a cluster"
|
||||
.to_string(),
|
||||
),
|
||||
)
|
||||
.await?;
|
||||
|
||||
debug!("Ensuring command stream");
|
||||
|
||||
let command_stream = nats::ensure_stream(
|
||||
&context,
|
||||
internal_stream_name(COMMAND_STREAM_NAME),
|
||||
vec![DEFAULT_COMMANDS_TOPIC.to_owned()],
|
||||
Some("A stream that stores all commands for wadm".to_string()),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let status_stream = nats::ensure_status_stream(
|
||||
&context,
|
||||
internal_stream_name(STATUS_STREAM_NAME),
|
||||
vec![DEFAULT_STATUS_TOPIC.to_owned()],
|
||||
)
|
||||
.await?;
|
||||
|
||||
debug!("Ensuring wasmbus event stream");
|
||||
|
||||
// Remove the previous wadm_(multitenant)_mirror streams so that they don't
|
||||
// prevent us from creating the new wasmbus_(multitenant)_events stream
|
||||
// TODO(joonas): Remove this some time in the future once we're confident
|
||||
// enough that there are no more wadm_(multitenant)_mirror streams around.
|
||||
for mirror_stream_name in &["wadm_mirror", "wadm_multitenant_mirror"] {
|
||||
if (context.get_stream(mirror_stream_name).await).is_ok() {
|
||||
context.delete_stream(mirror_stream_name).await?;
|
||||
}
|
||||
}
|
||||
|
||||
let wasmbus_event_subjects = match args.multitenant {
|
||||
true => vec![DEFAULT_MULTITENANT_EVENTS_TOPIC.to_owned()],
|
||||
false => vec![DEFAULT_EVENTS_TOPIC.to_owned()],
|
||||
};
|
||||
|
||||
let wasmbus_event_stream = nats::ensure_limits_stream(
|
||||
&context,
|
||||
WASMBUS_EVENT_STREAM_NAME.to_string(),
|
||||
wasmbus_event_subjects.clone(),
|
||||
Some(
|
||||
"A stream that stores all events coming in on the wasmbus.evt subject in a cluster"
|
||||
.to_string(),
|
||||
),
|
||||
)
|
||||
.await?;
|
||||
|
||||
debug!("Ensuring notify stream");
|
||||
|
||||
let notify_stream = nats::ensure_notify_stream(
|
||||
&context,
|
||||
NOTIFY_STREAM_NAME.to_owned(),
|
||||
vec![format!("{WADM_NOTIFY_PREFIX}.*")],
|
||||
)
|
||||
.await?;
|
||||
|
||||
debug!("Ensuring event consumer stream");
|
||||
|
||||
let event_consumer_stream = nats::ensure_event_consumer_stream(
|
||||
&context,
|
||||
WADM_EVENT_CONSUMER_STREAM_NAME.to_owned(),
|
||||
DEFAULT_WADM_EVENT_CONSUMER_TOPIC.to_owned(),
|
||||
vec![&wasmbus_event_stream, &event_stream],
|
||||
Some(
|
||||
"A stream that sources from wadm_events and wasmbus_events for wadm event consumer's use"
|
||||
.to_string(),
|
||||
),
|
||||
)
|
||||
.await?;
|
||||
|
||||
debug!("Creating event consumer manager");
|
||||
|
||||
let permit_pool = Arc::new(Semaphore::new(
|
||||
args.max_jobs.unwrap_or(Semaphore::MAX_PERMITS),
|
||||
));
|
||||
let event_worker_creator = EventWorkerCreator {
|
||||
state_store: state_storage.clone(),
|
||||
manifest_store: manifest_storage.clone(),
|
||||
pool: connection_pool.clone(),
|
||||
command_topic_prefix: DEFAULT_COMMANDS_TOPIC.trim_matches(trimmer).to_owned(),
|
||||
publisher: context.clone(),
|
||||
notify_stream,
|
||||
status_stream: status_stream.clone(),
|
||||
};
|
||||
let events_manager: ConsumerManager<EventConsumer> = ConsumerManager::new(
|
||||
permit_pool.clone(),
|
||||
event_consumer_stream,
|
||||
event_worker_creator.clone(),
|
||||
args.multitenant,
|
||||
)
|
||||
.await;
|
||||
|
||||
debug!("Creating command consumer manager");
|
||||
|
||||
let command_worker_creator = CommandWorkerCreator {
|
||||
pool: connection_pool,
|
||||
};
|
||||
let commands_manager: ConsumerManager<CommandConsumer> = ConsumerManager::new(
|
||||
permit_pool.clone(),
|
||||
command_stream,
|
||||
command_worker_creator.clone(),
|
||||
args.multitenant,
|
||||
)
|
||||
.await;
|
||||
|
||||
// TODO(thomastaylor312): We might want to figure out how not to run this globally. Doing a
|
||||
// synthetic event sent to the stream could be nice, but all the wadm processes would still fire
|
||||
// off that tick, resulting in multiple people handling. We could maybe get it to work with the
|
||||
// right duplicate window, but we have no idea when each process could fire a tick. Worst case
|
||||
// scenario right now is that multiple fire simultaneously and a few of them just delete nothing
|
||||
let reaper = Reaper::new(
|
||||
state_storage.clone(),
|
||||
Duration::from_secs(args.cleanup_interval / 2),
|
||||
[],
|
||||
);
|
||||
|
||||
let wadm_event_prefix = DEFAULT_WADM_EVENTS_TOPIC.trim_matches(trimmer);
|
||||
|
||||
debug!("Creating lattice observer");
|
||||
|
||||
let observer = observer::Observer {
|
||||
parser: LatticeIdParser::new("wasmbus", args.multitenant),
|
||||
command_manager: commands_manager,
|
||||
event_manager: events_manager,
|
||||
reaper,
|
||||
client: client.clone(),
|
||||
command_worker_creator,
|
||||
event_worker_creator,
|
||||
};
|
||||
|
||||
debug!("Subscribing to API topic");
|
||||
|
||||
let server = Server::new(
|
||||
manifest_storage,
|
||||
client,
|
||||
Some(&args.api_prefix),
|
||||
args.multitenant,
|
||||
status_stream,
|
||||
ManifestNotifier::new(wadm_event_prefix, context),
|
||||
)
|
||||
.await?;
|
||||
let mut wadm = start_wadm(args).await.context("failed to run wadm")?;
|
||||
tokio::select! {
|
||||
res = server.serve() => {
|
||||
res?
|
||||
res = wadm.join_next() => {
|
||||
match res {
|
||||
Some(Ok(_)) => {
|
||||
tracing::info!("WADM has exited successfully");
|
||||
std::process::exit(0);
|
||||
}
|
||||
Some(Err(e)) => {
|
||||
tracing::error!("WADM has exited with an error: {:?}", e);
|
||||
std::process::exit(1);
|
||||
}
|
||||
None => {
|
||||
tracing::info!("WADM server did not start");
|
||||
std::process::exit(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
res = observer.observe(wasmbus_event_subjects) => {
|
||||
res?
|
||||
_ = tokio::signal::ctrl_c() => {
|
||||
tracing::info!("Received Ctrl+C, shutting down");
|
||||
std::process::exit(0);
|
||||
}
|
||||
_ = tokio::signal::ctrl_c() => {}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct CommandWorkerCreator {
|
||||
pool: ControlClientConstructor,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl WorkerCreator for CommandWorkerCreator {
|
||||
type Output = CommandWorker;
|
||||
|
||||
async fn create(
|
||||
&self,
|
||||
lattice_id: &str,
|
||||
multitenant_prefix: Option<&str>,
|
||||
) -> anyhow::Result<Self::Output> {
|
||||
let client = self.pool.get_connection(lattice_id, multitenant_prefix);
|
||||
|
||||
Ok(CommandWorker::new(client))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct EventWorkerCreator<StateStore> {
|
||||
state_store: StateStore,
|
||||
manifest_store: async_nats::jetstream::kv::Store,
|
||||
pool: ControlClientConstructor,
|
||||
command_topic_prefix: String,
|
||||
publisher: Context,
|
||||
notify_stream: Stream,
|
||||
status_stream: Stream,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<StateStore> WorkerCreator for EventWorkerCreator<StateStore>
|
||||
where
|
||||
StateStore: wadm::storage::Store + Send + Sync + Clone + 'static,
|
||||
{
|
||||
type Output = EventWorker<StateStore, wasmcloud_control_interface::Client, Context>;
|
||||
|
||||
async fn create(
|
||||
&self,
|
||||
lattice_id: &str,
|
||||
multitenant_prefix: Option<&str>,
|
||||
) -> anyhow::Result<Self::Output> {
|
||||
let client = self.pool.get_connection(lattice_id, multitenant_prefix);
|
||||
let command_publisher = CommandPublisher::new(
|
||||
self.publisher.clone(),
|
||||
&format!("{}.{lattice_id}", self.command_topic_prefix),
|
||||
);
|
||||
let status_publisher = StatusPublisher::new(
|
||||
self.publisher.clone(),
|
||||
Some(self.status_stream.clone()),
|
||||
&format!("wadm.status.{lattice_id}"),
|
||||
);
|
||||
let manager = ScalerManager::new(
|
||||
self.publisher.clone(),
|
||||
self.notify_stream.clone(),
|
||||
lattice_id,
|
||||
multitenant_prefix,
|
||||
self.state_store.clone(),
|
||||
self.manifest_store.clone(),
|
||||
command_publisher.clone(),
|
||||
status_publisher.clone(),
|
||||
client.clone(),
|
||||
)
|
||||
.await?;
|
||||
Ok(EventWorker::new(
|
||||
self.state_store.clone(),
|
||||
client,
|
||||
command_publisher,
|
||||
status_publisher,
|
||||
manager,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -183,8 +183,8 @@ async fn test_crud_operations() {
|
|||
);
|
||||
|
||||
// Now check that the data returned is correct
|
||||
let resp: Vec<ModelSummary> = test_server
|
||||
.get_response("default.model.list", Vec::new(), None)
|
||||
let ListModelsResponse { models: resp, .. } = test_server
|
||||
.get_response("default.model.get", Vec::new(), None)
|
||||
.await;
|
||||
|
||||
assert_eq!(resp.len(), 2, "Should have two models in storage");
|
||||
|
@ -230,8 +230,8 @@ async fn test_crud_operations() {
|
|||
assert_put_response(resp, PutResult::NewVersion, "v0.0.3", 3);
|
||||
|
||||
// Make sure we still only have 2 manifests
|
||||
let resp: Vec<ModelSummary> = test_server
|
||||
.get_response("default.model.list", Vec::new(), None)
|
||||
let ListModelsResponse { models: resp, .. } = test_server
|
||||
.get_response("default.model.get", Vec::new(), None)
|
||||
.await;
|
||||
|
||||
assert_eq!(resp.len(), 2, "Should still have two models in storage");
|
||||
|
@ -452,7 +452,7 @@ async fn test_delete_noop() {
|
|||
.expect("should have created a nats client");
|
||||
let test_server = setup_server("delete_noop", nats_client).await;
|
||||
|
||||
// Delete something that doesn't exist
|
||||
// Delete a model that doesn't exist
|
||||
let resp: DeleteModelResponse = test_server
|
||||
.get_response(
|
||||
"default.model.del.my-example-app",
|
||||
|
@ -469,7 +469,19 @@ async fn test_delete_noop() {
|
|||
);
|
||||
assert!(!resp.message.is_empty(), "Should have a message set");
|
||||
|
||||
// Delete a non-existent version
|
||||
let resp: DeleteModelResponse = test_server
|
||||
.get_response(
|
||||
"default.model.del.my-example-app",
|
||||
serde_json::to_vec(&DeleteModelRequest { version: None }).unwrap(),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
assert!(
|
||||
matches!(resp.result, DeleteResult::Noop),
|
||||
"Should have gotten noop response for already deleted model"
|
||||
);
|
||||
|
||||
// Delete a non-existent version for an existing model
|
||||
let raw = tokio::fs::read("./oam/sqldbpostgres.yaml")
|
||||
.await
|
||||
.expect("Unable to load file");
|
||||
|
|
|
@ -32,12 +32,10 @@ async fn test_commands() {
|
|||
.await
|
||||
.expect("should get hosts back")
|
||||
.first()
|
||||
.as_ref()
|
||||
.expect("Should be able to find hosts")
|
||||
.response
|
||||
.as_ref()
|
||||
.expect("Should be able to get host")
|
||||
.id
|
||||
.data()
|
||||
.map(|h| h.id())
|
||||
.expect("should be able to get host")
|
||||
.to_owned();
|
||||
|
||||
let mut sub = nats_client
|
||||
|
@ -70,13 +68,13 @@ async fn test_commands() {
|
|||
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
|
||||
|
||||
// Get the current components and make sure stuff was started
|
||||
let inventory = ctl_client
|
||||
let resp_data = ctl_client
|
||||
.get_host_inventory(&host_id)
|
||||
.await
|
||||
.expect("should get host inventory back")
|
||||
.response
|
||||
.expect("should have host inventory")
|
||||
.components;
|
||||
.into_data()
|
||||
.expect("should have host inventory");
|
||||
let inventory = resp_data.components();
|
||||
assert_eq!(
|
||||
inventory.len(),
|
||||
1,
|
||||
|
@ -84,17 +82,18 @@ async fn test_commands() {
|
|||
inventory
|
||||
);
|
||||
assert_eq!(
|
||||
inventory[0].image_ref, HELLO_IMAGE_REF,
|
||||
inventory[0].image_ref(),
|
||||
HELLO_IMAGE_REF,
|
||||
"Should have started the correct component"
|
||||
);
|
||||
assert_eq!(
|
||||
inventory[0].max_instances, 2,
|
||||
inventory[0].max_instances(),
|
||||
2,
|
||||
"Should have started the component with correct concurrency"
|
||||
);
|
||||
assert_eq!(
|
||||
inventory[0]
|
||||
.annotations
|
||||
.as_ref()
|
||||
.annotations()
|
||||
.unwrap()
|
||||
.get(wadm::MANAGED_BY_ANNOTATION)
|
||||
.expect("Should have the managed by annotation"),
|
||||
|
@ -103,8 +102,7 @@ async fn test_commands() {
|
|||
);
|
||||
assert_eq!(
|
||||
inventory[0]
|
||||
.annotations
|
||||
.as_ref()
|
||||
.annotations()
|
||||
.unwrap()
|
||||
.get(wadm::APP_SPEC_ANNOTATION)
|
||||
.expect("Should have the managed by annotation"),
|
||||
|
@ -150,23 +148,22 @@ async fn test_commands() {
|
|||
wait_for_event(&mut sub, "health_check_passed").await;
|
||||
|
||||
// Get the current providers and make sure stuff was started
|
||||
let inventory = ctl_client
|
||||
let resp_data = ctl_client
|
||||
.get_host_inventory(&host_id)
|
||||
.await
|
||||
.expect("should get host inventory back")
|
||||
.response
|
||||
.expect("should have host inventory")
|
||||
.providers;
|
||||
.into_data()
|
||||
.expect("should have host inventory");
|
||||
let inventory = resp_data.providers();
|
||||
assert_eq!(inventory.len(), 1, "Should only have 1 provider");
|
||||
assert_eq!(
|
||||
inventory[0].image_ref.as_deref().unwrap(),
|
||||
inventory[0].image_ref().unwrap(),
|
||||
HTTP_SERVER_IMAGE_REF,
|
||||
"Should have started the correct provider"
|
||||
);
|
||||
assert_eq!(
|
||||
inventory[0]
|
||||
.annotations
|
||||
.as_ref()
|
||||
.annotations()
|
||||
.unwrap()
|
||||
.get(wadm::MANAGED_BY_ANNOTATION)
|
||||
.expect("Should have the managed by annotation"),
|
||||
|
@ -175,8 +172,7 @@ async fn test_commands() {
|
|||
);
|
||||
assert_eq!(
|
||||
inventory[0]
|
||||
.annotations
|
||||
.as_ref()
|
||||
.annotations()
|
||||
.unwrap()
|
||||
.get(wadm::APP_SPEC_ANNOTATION)
|
||||
.expect("Should have the managed by annotation"),
|
||||
|
@ -211,16 +207,16 @@ async fn test_commands() {
|
|||
.get_links()
|
||||
.await
|
||||
.expect("should get links back")
|
||||
.response
|
||||
.into_data()
|
||||
.expect("should have links");
|
||||
// We could have more than one link due to local testing, so search for the proper link
|
||||
inventory
|
||||
.into_iter()
|
||||
.find(|ld| {
|
||||
ld.source_id == HTTP_SERVER_COMPONENT_ID
|
||||
&& ld.target == HELLO_COMPONENT_ID
|
||||
&& ld.wit_namespace == "wasi"
|
||||
&& ld.wit_package == "http"
|
||||
ld.source_id() == HTTP_SERVER_COMPONENT_ID
|
||||
&& ld.target() == HELLO_COMPONENT_ID
|
||||
&& ld.wit_namespace() == "wasi"
|
||||
&& ld.wit_package() == "http"
|
||||
})
|
||||
.expect("Linkdef should exist");
|
||||
|
||||
|
@ -248,15 +244,15 @@ async fn test_commands() {
|
|||
.get_links()
|
||||
.await
|
||||
.expect("should get links back")
|
||||
.response
|
||||
.into_data()
|
||||
.expect("should have links");
|
||||
// We could have more than one link due to local testing, so search for the proper link
|
||||
assert!(
|
||||
!inventory.into_iter().any(|ld| {
|
||||
ld.target == HELLO_COMPONENT_ID
|
||||
&& ld.source_id == HTTP_SERVER_COMPONENT_ID
|
||||
&& ld.wit_namespace == "wasi"
|
||||
&& ld.wit_package == "http"
|
||||
ld.target() == HELLO_COMPONENT_ID
|
||||
&& ld.source_id() == HTTP_SERVER_COMPONENT_ID
|
||||
&& ld.wit_namespace() == "wasi"
|
||||
&& ld.wit_package() == "http"
|
||||
}),
|
||||
"Linkdef should be deleted"
|
||||
);
|
||||
|
@ -280,13 +276,13 @@ async fn test_commands() {
|
|||
wait_for_event(&mut sub, "provider_stopped").await;
|
||||
|
||||
// Get the current providers and make sure stuff was started
|
||||
let inventory = ctl_client
|
||||
let resp_data = ctl_client
|
||||
.get_host_inventory(&host_id)
|
||||
.await
|
||||
.expect("should get host inventory back")
|
||||
.response
|
||||
.expect("should have host inventory")
|
||||
.providers;
|
||||
.into_data()
|
||||
.expect("should have host inventory");
|
||||
let inventory = resp_data.providers();
|
||||
assert!(inventory.is_empty(), "Should have no providers");
|
||||
|
||||
// Stop the component
|
||||
|
@ -311,13 +307,13 @@ async fn test_commands() {
|
|||
wait_for_event(&mut sub, "component_scaled").await;
|
||||
|
||||
// Get the current providers and make sure stuff was started
|
||||
let inventory = ctl_client
|
||||
let resp_data = ctl_client
|
||||
.get_host_inventory(&host_id)
|
||||
.await
|
||||
.expect("should get host inventory back")
|
||||
.response
|
||||
.expect("should have host inventory")
|
||||
.components;
|
||||
.into_data()
|
||||
.expect("should have host inventory");
|
||||
let inventory = resp_data.components();
|
||||
assert!(inventory.is_empty(), "Should have no components");
|
||||
}
|
||||
|
||||
|
@ -344,31 +340,20 @@ async fn test_annotation_stop() {
|
|||
.await
|
||||
.unwrap();
|
||||
|
||||
let host_id = ctl_client
|
||||
.get_hosts()
|
||||
.await
|
||||
.unwrap()
|
||||
let responses = ctl_client.get_hosts().await.unwrap();
|
||||
let host_id = responses
|
||||
.first()
|
||||
.expect("Should be able to find hosts")
|
||||
.response
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.id
|
||||
.to_owned();
|
||||
.data()
|
||||
.map(|v| v.id())
|
||||
.unwrap();
|
||||
|
||||
// Start an unmangaged component
|
||||
// NOTE(thomastaylor312): This is a workaround with current behavior where empty annotations
|
||||
// acts on _everything_. We could technically move this back down after the initial scale up of
|
||||
// the managed components after https://github.com/wasmCloud/wasmCloud/issues/746 is resolved
|
||||
ctl_client
|
||||
.scale_component(
|
||||
&host_id,
|
||||
HELLO_IMAGE_REF,
|
||||
"unmanaged-hello",
|
||||
1,
|
||||
None,
|
||||
vec![],
|
||||
)
|
||||
.scale_component(host_id, HELLO_IMAGE_REF, "unmanaged-hello", 1, None, vec![])
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
|
@ -382,7 +367,7 @@ async fn test_annotation_stop() {
|
|||
.publish_command(ScaleComponent {
|
||||
component_id: HELLO_COMPONENT_ID.to_string(),
|
||||
reference: HELLO_IMAGE_REF.to_string(),
|
||||
host_id: host_id.clone(),
|
||||
host_id: host_id.into(),
|
||||
count: 2,
|
||||
model_name: "fake".into(),
|
||||
annotations: BTreeMap::from_iter([("fake".to_string(), "wake".to_string())]),
|
||||
|
@ -403,18 +388,17 @@ async fn test_annotation_stop() {
|
|||
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
|
||||
|
||||
// Get the current components and make sure stuff was started
|
||||
let inventory = ctl_client
|
||||
.get_host_inventory(&host_id)
|
||||
let resp_data = ctl_client
|
||||
.get_host_inventory(host_id)
|
||||
.await
|
||||
.expect("should get host inventory back")
|
||||
.response
|
||||
.expect("should have host inventory")
|
||||
.components;
|
||||
.into_data()
|
||||
.expect("should have host inventory");
|
||||
let inventory = resp_data.components();
|
||||
let managed_inventory = inventory
|
||||
.iter()
|
||||
.filter(|c| {
|
||||
c.annotations
|
||||
.as_ref()
|
||||
c.annotations()
|
||||
.is_some_and(|a| a.contains_key(MANAGED_BY_ANNOTATION))
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
@ -425,16 +409,17 @@ async fn test_annotation_stop() {
|
|||
"Should only have 1 managed component"
|
||||
);
|
||||
assert_eq!(
|
||||
managed_inventory[0].image_ref, HELLO_IMAGE_REF,
|
||||
managed_inventory[0].image_ref(),
|
||||
HELLO_IMAGE_REF,
|
||||
"Should have started the correct component"
|
||||
);
|
||||
assert!(managed_inventory[0]
|
||||
.annotations
|
||||
.as_ref()
|
||||
.annotations()
|
||||
.map(|annotations| annotations.contains_key(MANAGED_BY_ANNOTATION))
|
||||
.unwrap_or(false));
|
||||
assert_eq!(
|
||||
managed_inventory[0].max_instances, 2,
|
||||
managed_inventory[0].max_instances(),
|
||||
2,
|
||||
"Should have started the correct concurrency of components"
|
||||
);
|
||||
|
||||
|
@ -444,7 +429,7 @@ async fn test_annotation_stop() {
|
|||
component_id: HELLO_COMPONENT_ID.to_owned(),
|
||||
reference: HELLO_IMAGE_REF.to_string(),
|
||||
count: 0,
|
||||
host_id: host_id.clone(),
|
||||
host_id: host_id.into(),
|
||||
model_name: "fake".into(),
|
||||
annotations: BTreeMap::from_iter([("fake".to_string(), "wake".to_string())]),
|
||||
config: vec![],
|
||||
|
@ -460,20 +445,22 @@ async fn test_annotation_stop() {
|
|||
wait_for_event(&mut sub, "component_scaled").await;
|
||||
|
||||
// Get the current providers and make sure stuff was started
|
||||
let inventory = ctl_client
|
||||
.get_host_inventory(&host_id)
|
||||
let resp_data = ctl_client
|
||||
.get_host_inventory(host_id)
|
||||
.await
|
||||
.unwrap()
|
||||
.response
|
||||
.unwrap()
|
||||
.components;
|
||||
.into_data()
|
||||
.unwrap();
|
||||
let inventory = resp_data.components();
|
||||
assert_eq!(inventory.len(), 1, "Should only have 1 component");
|
||||
assert_eq!(
|
||||
inventory[0].image_ref, HELLO_IMAGE_REF,
|
||||
inventory[0].image_ref(),
|
||||
HELLO_IMAGE_REF,
|
||||
"Should have started the correct component"
|
||||
);
|
||||
assert_eq!(
|
||||
inventory[0].max_instances, 1,
|
||||
inventory[0].max_instances(),
|
||||
1,
|
||||
"Should have 1 unmanaged component still running"
|
||||
);
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@ services:
|
|||
- 4222:4222
|
||||
# Have hosts in 3 different "regions"
|
||||
wasmcloud_east:
|
||||
image: wasmcloud/wasmcloud:1.0.4
|
||||
image: wasmcloud/wasmcloud:latest
|
||||
depends_on:
|
||||
- nats
|
||||
deploy:
|
||||
|
@ -18,7 +18,7 @@ services:
|
|||
WASMCLOUD_CLUSTER_SEED: SCAOGJWX53TGI4233T6GAXWYWBIB5ZDGPTCO6ODJQYELS52YCQCBQSRPA4
|
||||
WASMCLOUD_LABEL_region: us-brooks-east
|
||||
wasmcloud_west:
|
||||
image: wasmcloud/wasmcloud:1.0.4
|
||||
image: wasmcloud/wasmcloud:latest
|
||||
depends_on:
|
||||
- nats
|
||||
deploy:
|
||||
|
@ -30,7 +30,7 @@ services:
|
|||
WASMCLOUD_CLUSTER_SEED: SCAOGJWX53TGI4233T6GAXWYWBIB5ZDGPTCO6ODJQYELS52YCQCBQSRPA4
|
||||
WASMCLOUD_LABEL_region: us-taylor-west
|
||||
wasmcloud_moon:
|
||||
image: wasmcloud/wasmcloud:1.0.4
|
||||
image: wasmcloud/wasmcloud:latest
|
||||
depends_on:
|
||||
- nats
|
||||
deploy:
|
|
@ -0,0 +1,28 @@
|
|||
services:
|
||||
nats:
|
||||
image: nats:2.10-alpine
|
||||
command: ['-js']
|
||||
ports:
|
||||
- 4222:4222
|
||||
wasmcloud_test_host_one:
|
||||
image: wasmcloud/wasmcloud:latest
|
||||
depends_on:
|
||||
- nats
|
||||
deploy:
|
||||
replicas: 2
|
||||
environment:
|
||||
LC_ALL: en_US.UTF-8
|
||||
RUST_LOG: debug,hyper=info
|
||||
WASMCLOUD_NATS_HOST: nats
|
||||
WASMCLOUD_LATTICE: shared_providers
|
||||
wasmcloud_test_host_two:
|
||||
image: wasmcloud/wasmcloud:latest
|
||||
depends_on:
|
||||
- nats
|
||||
deploy:
|
||||
replicas: 2
|
||||
environment:
|
||||
LC_ALL: en_US.UTF-8
|
||||
RUST_LOG: debug,hyper=info
|
||||
WASMCLOUD_NATS_HOST: nats
|
||||
WASMCLOUD_LATTICE: shared_components
|
|
@ -5,7 +5,7 @@ services:
|
|||
ports:
|
||||
- 4222:4222
|
||||
wasmcloud:
|
||||
image: wasmcloud/wasmcloud:1.0.4
|
||||
image: wasmcloud/wasmcloud:latest
|
||||
depends_on:
|
||||
- nats
|
||||
deploy:
|
70
tests/e2e.rs
70
tests/e2e.rs
|
@ -10,7 +10,6 @@ use async_nats::{
|
|||
jetstream::{self, stream::Stream},
|
||||
Client,
|
||||
};
|
||||
use base64::{engine::general_purpose::STANDARD as B64decoder, Engine};
|
||||
use futures::Future;
|
||||
use tokio::{
|
||||
process::{Child, Command},
|
||||
|
@ -95,10 +94,18 @@ impl ClientInfo {
|
|||
.kill_on_drop(true)
|
||||
.spawn()
|
||||
.expect("Unable to watch docker logs");
|
||||
|
||||
// Connect to NATS
|
||||
let client = async_nats::connect("127.0.0.1:4222")
|
||||
.await
|
||||
.expect("Unable to connect to nats");
|
||||
let client = tokio::time::timeout(Duration::from_secs(3), async {
|
||||
loop {
|
||||
if let Ok(client) = async_nats::connect("127.0.0.1:4222").await {
|
||||
return client;
|
||||
}
|
||||
tokio::time::sleep(Duration::from_millis(250)).await;
|
||||
}
|
||||
})
|
||||
.await
|
||||
.expect("timed out while creating NATS client");
|
||||
|
||||
ClientInfo {
|
||||
client,
|
||||
|
@ -147,7 +154,7 @@ impl ClientInfo {
|
|||
self.wadm_clients.insert(lattice_prefix.to_string(), client);
|
||||
}
|
||||
|
||||
pub async fn launch_wadm(&mut self) {
|
||||
pub async fn launch_wadm(&mut self, extra_envs: Option<HashMap<&str, &str>>) {
|
||||
let repo_root =
|
||||
PathBuf::from(std::env::var("CARGO_MANIFEST_DIR").expect("Unable to find repo root"));
|
||||
// Create the logging directory
|
||||
|
@ -165,6 +172,12 @@ impl ClientInfo {
|
|||
wadm_binary_path.display()
|
||||
)
|
||||
}
|
||||
let mut envs = HashMap::from([
|
||||
("RUST_LOG","info,wadm=debug,wadm::scaler=trace,wadm::workers::event=trace,wasmcloud_control_interface=trace")
|
||||
]);
|
||||
if let Some(extra_envs) = extra_envs {
|
||||
envs.extend(extra_envs);
|
||||
}
|
||||
|
||||
for i in 0..3 {
|
||||
let log_path = log_dir.join(format!("wadm-{i}"));
|
||||
|
@ -177,10 +190,7 @@ impl ClientInfo {
|
|||
.stderr(file)
|
||||
.stdout(Stdio::null())
|
||||
.kill_on_drop(true)
|
||||
.env(
|
||||
"RUST_LOG",
|
||||
"info,wadm=debug,wadm::scaler=trace,wadm::workers::event=trace,wasmcloud_control_interface=trace",
|
||||
)
|
||||
.envs(&envs)
|
||||
.spawn()
|
||||
.expect("Unable to spawn wadm binary");
|
||||
self.commands.push(child);
|
||||
|
@ -242,9 +252,13 @@ impl ClientInfo {
|
|||
.await
|
||||
.expect("Should be able to fetch hosts")
|
||||
.into_iter()
|
||||
.filter_map(|host| {
|
||||
host.response
|
||||
.map(|resp| (self.ctl_client(lattice_prefix).clone(), resp.id))
|
||||
.filter_map(|resp| {
|
||||
resp.into_data().map(|resp| {
|
||||
(
|
||||
self.ctl_client(lattice_prefix).clone(),
|
||||
resp.id().to_string(),
|
||||
)
|
||||
})
|
||||
})
|
||||
.map(|(client, host_id)| async move {
|
||||
let inventory = client
|
||||
|
@ -252,9 +266,9 @@ impl ClientInfo {
|
|||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e:?}"))?;
|
||||
Ok((
|
||||
host_id,
|
||||
host_id.to_string(),
|
||||
inventory
|
||||
.response
|
||||
.into_data()
|
||||
.expect("Should have host inventory response"),
|
||||
))
|
||||
});
|
||||
|
@ -333,7 +347,7 @@ pub async fn check_config(
|
|||
.get_config(config_name)
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!(e))?
|
||||
.response
|
||||
.into_data()
|
||||
.expect("Should have config response");
|
||||
for (key, value) in values {
|
||||
if let Some(expected) = config.get(key) {
|
||||
|
@ -357,13 +371,12 @@ pub fn check_components(
|
|||
manifest_name: &str,
|
||||
expected_count: usize,
|
||||
) -> anyhow::Result<()> {
|
||||
let all_components = inventory.values().flat_map(|inv| &inv.components);
|
||||
let all_components = inventory.values().flat_map(|inv| inv.components());
|
||||
let component_count: usize = all_components
|
||||
.filter(|component| {
|
||||
component.image_ref == image_ref
|
||||
component.image_ref() == image_ref
|
||||
&& component
|
||||
.annotations
|
||||
.as_ref()
|
||||
.annotations()
|
||||
.and_then(|annotations| {
|
||||
annotations
|
||||
.get(APP_SPEC_ANNOTATION)
|
||||
|
@ -371,7 +384,7 @@ pub fn check_components(
|
|||
})
|
||||
.unwrap_or(false)
|
||||
})
|
||||
.map(|component| component.max_instances as usize)
|
||||
.map(|component| component.max_instances() as usize)
|
||||
.sum();
|
||||
if component_count != expected_count {
|
||||
anyhow::bail!(
|
||||
|
@ -419,18 +432,16 @@ pub fn check_providers(
|
|||
) -> anyhow::Result<()> {
|
||||
let provider_count = inventory
|
||||
.values()
|
||||
.flat_map(|inv| &inv.providers)
|
||||
.flat_map(|inv| inv.providers())
|
||||
.filter(|provider| {
|
||||
// You can only have 1 provider per host and that could be created by any manifest,
|
||||
// so we can just check the image ref and that it is managed by wadm
|
||||
provider
|
||||
.image_ref
|
||||
.as_deref()
|
||||
.image_ref()
|
||||
.map(|image| image == image_ref)
|
||||
.unwrap_or(false)
|
||||
&& provider
|
||||
.annotations
|
||||
.as_ref()
|
||||
.annotations()
|
||||
.and_then(|annotations| {
|
||||
annotations
|
||||
.get(MANAGED_BY_ANNOTATION)
|
||||
|
@ -469,12 +480,9 @@ pub async fn get_manifest_status_info(
|
|||
match stream
|
||||
.get_last_raw_message_by_subject(&format!("wadm.status.{lattice_id}.{name}",))
|
||||
.await
|
||||
.map(|raw| {
|
||||
B64decoder
|
||||
.decode(raw.payload)
|
||||
.map(|b| serde_json::from_slice::<Status>(&b))
|
||||
}) {
|
||||
Ok(Ok(Ok(status))) => Some(status.info),
|
||||
.map(|raw| serde_json::from_slice::<Status>(&raw.payload))
|
||||
{
|
||||
Ok(Ok(status)) => Some(status.info),
|
||||
// Model status doesn't exist or is invalid, assuming undeployed
|
||||
_ => None,
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@ use crate::{
|
|||
};
|
||||
|
||||
const MANIFESTS_PATH: &str = "tests/fixtures/manifests";
|
||||
const DOCKER_COMPOSE_FILE: &str = "tests/docker-compose-e2e.yaml";
|
||||
const DOCKER_COMPOSE_FILE: &str = "tests/docker-compose-e2e_multiple_hosts.yaml";
|
||||
const BLOBSTORE_FS_IMAGE_REF: &str = "ghcr.io/wasmcloud/blobstore-fs:0.6.0";
|
||||
const BLOBSTORE_FS_PROVIDER_ID: &str = "fileserver";
|
||||
const BLOBBY_IMAGE_REF: &str = "ghcr.io/wasmcloud/components/blobby-rust:0.4.0";
|
||||
|
@ -41,7 +41,7 @@ async fn run_multiple_host_tests() {
|
|||
let mut client_info = ClientInfo::new(manifest_dir, compose_file).await;
|
||||
client_info.add_ctl_client(DEFAULT_LATTICE_ID, None).await;
|
||||
client_info.add_wadm_client(DEFAULT_LATTICE_ID).await;
|
||||
client_info.launch_wadm().await;
|
||||
client_info.launch_wadm(None).await;
|
||||
|
||||
// Wait for the first event on the lattice prefix before we start deploying and checking
|
||||
// statuses. Wadm can absolutely handle hosts starting before you start the wadm process, but the first event
|
||||
|
@ -133,7 +133,7 @@ async fn test_no_requirements(client_info: &ClientInfo) {
|
|||
.get_config("hello_simple-httpaddr")
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("should have http provider source config {e}"))?
|
||||
.response
|
||||
.into_data()
|
||||
.context("should have http provider source config response")?;
|
||||
assert_eq!(
|
||||
config,
|
||||
|
@ -148,16 +148,16 @@ async fn test_no_requirements(client_info: &ClientInfo) {
|
|||
.get_links()
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e:?}"))?
|
||||
.response
|
||||
.into_data()
|
||||
.context("Should have links")?;
|
||||
|
||||
if !links.iter().any(|ld| {
|
||||
ld.source_id == HTTP_SERVER_COMPONENT_ID
|
||||
&& ld.target == HELLO_COMPONENT_ID
|
||||
&& ld.wit_namespace == "wasi"
|
||||
&& ld.wit_package == "http"
|
||||
&& ld.interfaces == vec!["incoming-handler"]
|
||||
&& ld.name == "default"
|
||||
ld.source_id() == HTTP_SERVER_COMPONENT_ID
|
||||
&& ld.target() == HELLO_COMPONENT_ID
|
||||
&& ld.wit_namespace() == "wasi"
|
||||
&& ld.wit_package() == "http"
|
||||
&& ld.name() == "default"
|
||||
&& *ld.interfaces() == vec!["incoming-handler"]
|
||||
}) {
|
||||
anyhow::bail!(
|
||||
"Link between http provider and hello component should exist: {:#?}",
|
||||
|
@ -206,7 +206,7 @@ async fn test_no_requirements(client_info: &ClientInfo) {
|
|||
.get_links()
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e:?}"))?
|
||||
.response
|
||||
.into_data()
|
||||
.context("Should have links")?;
|
||||
|
||||
if !links.is_empty() {
|
||||
|
@ -309,7 +309,7 @@ async fn test_complex_app(client_info: &ClientInfo) {
|
|||
.get_config("complex-defaultcode")
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("should have blobby component config: {e:?}"))?
|
||||
.response
|
||||
.into_data()
|
||||
.context("should have blobby component config response")?;
|
||||
assert_eq!(
|
||||
blobby_config,
|
||||
|
@ -320,7 +320,7 @@ async fn test_complex_app(client_info: &ClientInfo) {
|
|||
.get_config("complex-rootfs")
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("should have target link config {e:?}"))?
|
||||
.response
|
||||
.into_data()
|
||||
.context("should have target link config response")?;
|
||||
assert_eq!(
|
||||
blobby_target_config,
|
||||
|
@ -331,7 +331,7 @@ async fn test_complex_app(client_info: &ClientInfo) {
|
|||
.get_config("complex-httpaddr")
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("should have source link config {e:?}"))?
|
||||
.response
|
||||
.into_data()
|
||||
.context("should have target link config response")?;
|
||||
assert_eq!(
|
||||
http_source_config,
|
||||
|
@ -342,7 +342,7 @@ async fn test_complex_app(client_info: &ClientInfo) {
|
|||
.get_config("complex-defaultfs")
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("should have provider config {e:?}"))?
|
||||
.response
|
||||
.into_data()
|
||||
.context("should have provider config response")?;
|
||||
assert_eq!(
|
||||
fileserver_config,
|
||||
|
@ -362,16 +362,16 @@ async fn test_complex_app(client_info: &ClientInfo) {
|
|||
.get_links()
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e:?}"))?
|
||||
.response
|
||||
.into_data()
|
||||
.context("Should have links")?;
|
||||
|
||||
if !links.iter().any(|ld| {
|
||||
ld.source_id == HTTP_SERVER_COMPONENT_ID
|
||||
&& ld.target == BLOBBY_COMPONENT_ID
|
||||
&& ld.wit_namespace == "wasi"
|
||||
&& ld.wit_package == "http"
|
||||
&& ld.interfaces == vec!["incoming-handler"]
|
||||
&& ld.name == "default"
|
||||
ld.source_id() == HTTP_SERVER_COMPONENT_ID
|
||||
&& ld.target() == BLOBBY_COMPONENT_ID
|
||||
&& ld.wit_namespace() == "wasi"
|
||||
&& ld.wit_package() == "http"
|
||||
&& ld.name() == "default"
|
||||
&& *ld.interfaces() == vec!["incoming-handler"]
|
||||
}) {
|
||||
anyhow::bail!(
|
||||
"Link between blobby component and http provider should exist: {:#?}",
|
||||
|
@ -380,12 +380,12 @@ async fn test_complex_app(client_info: &ClientInfo) {
|
|||
}
|
||||
|
||||
if !links.iter().any(|ld| {
|
||||
ld.source_id == BLOBBY_COMPONENT_ID
|
||||
&& ld.target == BLOBSTORE_FS_PROVIDER_ID
|
||||
&& ld.wit_namespace == "wasi"
|
||||
&& ld.wit_package == "blobstore"
|
||||
&& ld.interfaces == vec!["blobstore"]
|
||||
&& ld.name == "default"
|
||||
ld.source_id() == BLOBBY_COMPONENT_ID
|
||||
&& ld.target() == BLOBSTORE_FS_PROVIDER_ID
|
||||
&& ld.wit_namespace() == "wasi"
|
||||
&& ld.wit_package() == "blobstore"
|
||||
&& ld.name() == "default"
|
||||
&& *ld.interfaces() == vec!["blobstore"]
|
||||
}) {
|
||||
anyhow::bail!(
|
||||
"Link between blobby component and blobstore-fs provider should exist: {:#?}",
|
||||
|
@ -395,21 +395,21 @@ async fn test_complex_app(client_info: &ClientInfo) {
|
|||
|
||||
// Make sure nothing is running on things it shouldn't be on
|
||||
if inventory.values().any(|inv| {
|
||||
inv.labels
|
||||
inv.labels()
|
||||
.get("region")
|
||||
.map(|region| region == "us-taylor-west" || region == "us-brooks-east")
|
||||
.unwrap_or(false)
|
||||
&& inv
|
||||
.providers
|
||||
.providers()
|
||||
.iter()
|
||||
.any(|prov| prov.id == BLOBSTORE_FS_PROVIDER_ID)
|
||||
.any(|prov| prov.id() == BLOBSTORE_FS_PROVIDER_ID)
|
||||
}) {
|
||||
anyhow::bail!("Provider should only be running on the moon");
|
||||
}
|
||||
let moon_inventory = inventory
|
||||
.values()
|
||||
.find(|inv| {
|
||||
inv.labels
|
||||
inv.labels()
|
||||
.get("region")
|
||||
.map(|region| region == "moon")
|
||||
.unwrap_or(false)
|
||||
|
@ -417,9 +417,9 @@ async fn test_complex_app(client_info: &ClientInfo) {
|
|||
.unwrap();
|
||||
|
||||
if moon_inventory
|
||||
.components
|
||||
.components()
|
||||
.iter()
|
||||
.any(|component| component.id == BLOBBY_COMPONENT_ID)
|
||||
.any(|component| component.id() == BLOBBY_COMPONENT_ID)
|
||||
{
|
||||
anyhow::bail!("Actors shouldn't be running on the moon");
|
||||
}
|
||||
|
@ -460,16 +460,16 @@ async fn test_stop_host_rebalance(client_info: &ClientInfo) {
|
|||
.expect("Unable to fetch inventory")
|
||||
.into_iter()
|
||||
.filter(|(_, inv)| {
|
||||
inv.labels
|
||||
inv.labels()
|
||||
.get("region")
|
||||
.map(|region| region == "us-brooks-east")
|
||||
.unwrap_or(false)
|
||||
})
|
||||
.max_by_key(|(_, inv)| {
|
||||
inv.components
|
||||
inv.components()
|
||||
.iter()
|
||||
.find(|component| component.id == HELLO_COMPONENT_ID)
|
||||
.map(|desc| desc.max_instances)
|
||||
.find(|component| component.id() == HELLO_COMPONENT_ID)
|
||||
.map(|desc| desc.max_instances())
|
||||
.unwrap_or(0)
|
||||
})
|
||||
.map(|(host_id, _)| host_id)
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
// use crate::e2e::check_status;
|
||||
|
||||
// const MANIFESTS_PATH: &str = "tests/fixtures/manifests";
|
||||
// const DOCKER_COMPOSE_FILE: &str = "tests/docker-compose-e2e-multitenant.yaml";
|
||||
// const DOCKER_COMPOSE_FILE: &str = "tests/docker-compose-e2e_multitenant.yaml";
|
||||
|
||||
// const MESSAGE_PUB_ACTOR_ID: &str = "MC3QONHYH3FY4KYFCOSVJWIDJG4WA2PVD6FHKR7FFT457GVUTZJYR2TJ";
|
||||
// const NATS_PROVIDER_ID: &str = "VADNMSIML2XGO2X4TPIONTIC55R2UUQGPPDZPAVSC2QD7E76CR77SPW7";
|
||||
|
|
|
@ -0,0 +1,477 @@
|
|||
#![cfg(feature = "_e2e_tests")]
|
||||
use std::time::Duration;
|
||||
use std::{collections::HashMap, path::PathBuf};
|
||||
|
||||
use anyhow::{ensure, Context as _};
|
||||
use futures::StreamExt;
|
||||
use helpers::HTTP_CLIENT_IMAGE_REF;
|
||||
use wadm_types::api::StatusType;
|
||||
|
||||
mod e2e;
|
||||
mod helpers;
|
||||
|
||||
use e2e::{assert_status, check_components, check_providers, ClientInfo, ExpectedCount};
|
||||
|
||||
use crate::{
|
||||
e2e::check_status,
|
||||
helpers::{HELLO_IMAGE_REF, HTTP_SERVER_IMAGE_REF},
|
||||
};
|
||||
|
||||
const MANIFESTS_PATH: &str = "tests/fixtures/manifests/shared";
|
||||
const DOCKER_COMPOSE_FILE: &str = "tests/docker-compose-e2e_shared.yaml";
|
||||
|
||||
const SHARED_COMPONENTS_LATTICE: &str = "shared_components";
|
||||
const SHARED_PROVIDERS_LATTICE: &str = "shared_providers";
|
||||
const INVALID_TEST_LATTICE: &str = "shared_invalid";
|
||||
|
||||
#[cfg(feature = "_e2e_tests")]
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn run_shared_component_tests() {
|
||||
use futures::FutureExt;
|
||||
|
||||
let root_dir =
|
||||
PathBuf::from(std::env::var("CARGO_MANIFEST_DIR").expect("Unable to find repo root"));
|
||||
let manifest_dir = root_dir.join(MANIFESTS_PATH);
|
||||
let compose_file = root_dir.join(DOCKER_COMPOSE_FILE);
|
||||
|
||||
let mut client_info = ClientInfo::new(manifest_dir, compose_file).await;
|
||||
client_info
|
||||
.add_ctl_client(SHARED_COMPONENTS_LATTICE, None)
|
||||
.await;
|
||||
client_info.add_wadm_client(SHARED_COMPONENTS_LATTICE).await;
|
||||
client_info
|
||||
.add_ctl_client(SHARED_PROVIDERS_LATTICE, None)
|
||||
.await;
|
||||
client_info.add_wadm_client(SHARED_PROVIDERS_LATTICE).await;
|
||||
client_info.add_ctl_client(INVALID_TEST_LATTICE, None).await;
|
||||
client_info.add_wadm_client(INVALID_TEST_LATTICE).await;
|
||||
client_info.launch_wadm(None).await;
|
||||
|
||||
// Wait for the first event on the lattice prefix before we start deploying and checking
|
||||
// statuses. Wadm can absolutely handle hosts starting before you start the wadm process, but the first event
|
||||
// on the lattice will initialize the lattice monitor and for the following test we quickly assert things.
|
||||
let mut sub = client_info
|
||||
.client
|
||||
.subscribe("wasmbus.evt.*.>".to_string())
|
||||
.await
|
||||
.expect("Should be able to subscribe to default events");
|
||||
// Host heartbeats happen every 30 seconds, if we don't get a heartbeat in 2 minutes, bail.
|
||||
let _ = tokio::time::timeout(std::time::Duration::from_secs(120), sub.next())
|
||||
.await
|
||||
.expect("should have received a host heartbeat event before timeout");
|
||||
|
||||
// Wait for hosts to start
|
||||
let mut did_start = false;
|
||||
for _ in 0..10 {
|
||||
match (
|
||||
client_info
|
||||
.ctl_client(SHARED_COMPONENTS_LATTICE)
|
||||
.get_hosts()
|
||||
.await,
|
||||
client_info
|
||||
.ctl_client(SHARED_PROVIDERS_LATTICE)
|
||||
.get_hosts()
|
||||
.await,
|
||||
) {
|
||||
(Ok(hosts_one), Ok(hosts_two)) if hosts_one.len() == 2 && hosts_two.len() == 2 => {
|
||||
eprintln!(
|
||||
"Hosts {}/2, {}/2 currently available",
|
||||
hosts_one.len(),
|
||||
hosts_two.len()
|
||||
);
|
||||
did_start = true;
|
||||
break;
|
||||
}
|
||||
(Ok(hosts_one), Ok(hosts_two)) => {
|
||||
eprintln!(
|
||||
"Waiting for all hosts to be available, {}/2, {}/2 currently available",
|
||||
hosts_one.len(),
|
||||
hosts_two.len()
|
||||
);
|
||||
}
|
||||
(Err(e), _) | (_, Err(e)) => {
|
||||
eprintln!("Error when fetching hosts: {e}",)
|
||||
}
|
||||
}
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
|
||||
if !did_start {
|
||||
panic!("Hosts didn't start")
|
||||
}
|
||||
|
||||
let stream = client_info.get_status_stream().await;
|
||||
stream
|
||||
.purge()
|
||||
.await
|
||||
.expect("shouldn't have errored purging stream");
|
||||
|
||||
// The futures must be boxed or they're technically different types
|
||||
let tests = [
|
||||
test_shared_providers(&client_info).boxed(),
|
||||
test_shared_components(&client_info).boxed(),
|
||||
test_invalid_shared(&client_info).boxed(),
|
||||
];
|
||||
futures::future::join_all(tests).await;
|
||||
}
|
||||
|
||||
async fn test_shared_providers(client_info: &ClientInfo) {
|
||||
let stream = client_info.get_status_stream().await;
|
||||
let client = client_info.wadm_client(SHARED_PROVIDERS_LATTICE);
|
||||
let (name, _version) = client
|
||||
.put_manifest(client_info.load_raw_manifest("shared_http.yaml").await)
|
||||
.await
|
||||
.expect("Shouldn't have errored when creating manifest");
|
||||
|
||||
client
|
||||
.deploy_manifest(&name, None)
|
||||
.await
|
||||
.expect("Shouldn't have errored when deploying manifest");
|
||||
|
||||
assert_status(None, Some(5), || async {
|
||||
let inventory = client_info
|
||||
.get_all_inventory(SHARED_PROVIDERS_LATTICE)
|
||||
.await?;
|
||||
|
||||
check_providers(&inventory, HTTP_SERVER_IMAGE_REF, ExpectedCount::Exactly(1))?;
|
||||
check_providers(&inventory, HTTP_CLIENT_IMAGE_REF, ExpectedCount::Exactly(1))?;
|
||||
|
||||
let links = client_info
|
||||
.ctl_client(SHARED_PROVIDERS_LATTICE)
|
||||
.get_links()
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e:?}"))?
|
||||
.into_data()
|
||||
.context("Should have links")?;
|
||||
|
||||
ensure!(links.is_empty(), "Shouldn't have any links");
|
||||
|
||||
check_status(
|
||||
&stream,
|
||||
SHARED_PROVIDERS_LATTICE,
|
||||
"shared-http",
|
||||
StatusType::Deployed,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.await;
|
||||
|
||||
// Deploy manifest with HTTP component that depends on the shared manifest
|
||||
let (name, _version) = client
|
||||
.put_manifest(client_info.load_raw_manifest("shared_http_dev.yaml").await)
|
||||
.await
|
||||
.expect("Shouldn't have errored when creating manifest");
|
||||
|
||||
client
|
||||
.deploy_manifest(&name, None)
|
||||
.await
|
||||
.expect("Shouldn't have errored when deploying manifest");
|
||||
|
||||
assert_status(None, Some(5), || async {
|
||||
let inventory = client_info
|
||||
.get_all_inventory(SHARED_PROVIDERS_LATTICE)
|
||||
.await?;
|
||||
|
||||
// Ensure all configuration is set correctly
|
||||
let config = client_info
|
||||
.ctl_client(SHARED_PROVIDERS_LATTICE)
|
||||
.get_config("shared_http_dev-httpaddr")
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("should have http provider source config {e}"))?
|
||||
.into_data()
|
||||
.context("should have http provider source config response")?;
|
||||
assert_eq!(
|
||||
config,
|
||||
HashMap::from_iter(vec![("address".to_string(), "0.0.0.0:8080".to_string())])
|
||||
);
|
||||
|
||||
check_providers(&inventory, HTTP_SERVER_IMAGE_REF, ExpectedCount::Exactly(1))?;
|
||||
check_providers(&inventory, HTTP_CLIENT_IMAGE_REF, ExpectedCount::Exactly(1))?;
|
||||
check_components(&inventory, HELLO_IMAGE_REF, "shared-http-dev", 12)?;
|
||||
|
||||
let links = client_info
|
||||
.ctl_client(SHARED_PROVIDERS_LATTICE)
|
||||
.get_links()
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e:?}"))?
|
||||
.into_data()
|
||||
.context("Should have links")?;
|
||||
|
||||
ensure!(
|
||||
links.len() == 2,
|
||||
"Should have two links: http_server -> component -> http_client"
|
||||
);
|
||||
|
||||
if !links.iter().any(|ld| {
|
||||
// This is checking that the source ID and the target
|
||||
// come from the correct generated manifest IDs
|
||||
ld.source_id() == "shared_http-httpserver"
|
||||
&& ld.target() == "shared_http_dev-hello"
|
||||
&& ld.wit_namespace() == "wasi"
|
||||
&& ld.wit_package() == "http"
|
||||
&& ld.interfaces() == &vec!["incoming-handler"]
|
||||
&& ld.name() == "default"
|
||||
}) {
|
||||
anyhow::bail!(
|
||||
"Link between http server provider and hello component should exist: {:#?}",
|
||||
links
|
||||
)
|
||||
}
|
||||
if !links.iter().any(|ld| {
|
||||
// This is checking that the source ID and the target
|
||||
// come from the correct generated manifest IDs
|
||||
ld.source_id() == "shared_http_dev-hello"
|
||||
&& ld.target() == "shared_http-httpclient"
|
||||
&& ld.wit_namespace() == "wasi"
|
||||
&& ld.wit_package() == "http"
|
||||
&& ld.interfaces() == &vec!["outgoing-handler"]
|
||||
&& ld.name() == "default"
|
||||
}) {
|
||||
anyhow::bail!(
|
||||
"Link between hello component and http client provider should exist: {:#?}",
|
||||
links
|
||||
)
|
||||
}
|
||||
|
||||
check_status(
|
||||
&stream,
|
||||
SHARED_PROVIDERS_LATTICE,
|
||||
"shared-http",
|
||||
StatusType::Deployed,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
check_status(
|
||||
&stream,
|
||||
SHARED_PROVIDERS_LATTICE,
|
||||
"shared-http-dev",
|
||||
StatusType::Deployed,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// TODO(#451): Additional validation tests coming in a follow-up PR
|
||||
// // You can't undeploy an application that is depended on
|
||||
// assert!(client.undeploy_manifest("shared-http").await.is_err());
|
||||
// assert!(client.delete_manifest("shared-http", None).await.is_err());
|
||||
|
||||
// // Once dependent application is undeployed, you can undeploy and delete
|
||||
// assert!(client.undeploy_manifest("shared-http-dev").await.is_ok());
|
||||
// assert!(client.undeploy_manifest("shared-http").await.is_ok());
|
||||
// assert!(client.delete_manifest("shared-http", None).await.is_ok());
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.await;
|
||||
}
|
||||
|
||||
async fn test_shared_components(client_info: &ClientInfo) {
|
||||
let stream = client_info.get_status_stream().await;
|
||||
let client = client_info.wadm_client(SHARED_COMPONENTS_LATTICE);
|
||||
let (name, _version) = client
|
||||
.put_manifest(client_info.load_raw_manifest("shared_component.yaml").await)
|
||||
.await
|
||||
.expect("Shouldn't have errored when creating manifest");
|
||||
|
||||
client
|
||||
.deploy_manifest(&name, None)
|
||||
.await
|
||||
.expect("Shouldn't have errored when deploying manifest");
|
||||
|
||||
assert_status(None, Some(5), || async {
|
||||
let inventory = client_info
|
||||
.get_all_inventory(SHARED_COMPONENTS_LATTICE)
|
||||
.await?;
|
||||
|
||||
let config = client_info
|
||||
.ctl_client(SHARED_COMPONENTS_LATTICE)
|
||||
.get_config("shared_component-defaults")
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("should have http provider source config {e}"))?
|
||||
.into_data()
|
||||
.context("should have http provider source config response")?;
|
||||
assert_eq!(
|
||||
config,
|
||||
HashMap::from_iter(vec![("left".to_string(), "right".to_string())])
|
||||
);
|
||||
|
||||
check_components(&inventory, HELLO_IMAGE_REF, "shared-component", 1)?;
|
||||
|
||||
let links = client_info
|
||||
.ctl_client(SHARED_COMPONENTS_LATTICE)
|
||||
.get_links()
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e:?}"))?
|
||||
.into_data()
|
||||
.context("Should have links")?;
|
||||
|
||||
ensure!(links.is_empty(), "Shouldn't have any links");
|
||||
|
||||
check_status(
|
||||
&stream,
|
||||
SHARED_COMPONENTS_LATTICE,
|
||||
"shared-component",
|
||||
StatusType::Deployed,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.await;
|
||||
|
||||
// Deploy manifest with HTTP component that depends on the shared manifest
|
||||
let (name, _version) = client
|
||||
.put_manifest(
|
||||
client_info
|
||||
.load_raw_manifest("shared_component_dev.yaml")
|
||||
.await,
|
||||
)
|
||||
.await
|
||||
.expect("Shouldn't have errored when creating manifest");
|
||||
|
||||
client
|
||||
.deploy_manifest(&name, None)
|
||||
.await
|
||||
.expect("Shouldn't have errored when deploying manifest");
|
||||
|
||||
assert_status(None, Some(5), || async {
|
||||
let inventory = client_info
|
||||
.get_all_inventory(SHARED_COMPONENTS_LATTICE)
|
||||
.await?;
|
||||
|
||||
check_providers(&inventory, HTTP_SERVER_IMAGE_REF, ExpectedCount::Exactly(1))?;
|
||||
check_components(&inventory, HELLO_IMAGE_REF, "shared-component", 1)?;
|
||||
check_components(&inventory, HELLO_IMAGE_REF, "shared-component-dev", 12)?;
|
||||
|
||||
let config = client_info
|
||||
.ctl_client(SHARED_COMPONENTS_LATTICE)
|
||||
.get_config("shared_component_dev-someconfig")
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("should have http provider source config {e}"))?
|
||||
.into_data()
|
||||
.context("should have http provider source config response")?;
|
||||
assert_eq!(
|
||||
config,
|
||||
HashMap::from_iter(vec![("foo".to_string(), "bar".to_string())])
|
||||
);
|
||||
|
||||
let links = client_info
|
||||
.ctl_client(SHARED_COMPONENTS_LATTICE)
|
||||
.get_links()
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e:?}"))?
|
||||
.into_data()
|
||||
.context("Should have links")?;
|
||||
|
||||
ensure!(links.len() == 3, "Should have three links");
|
||||
|
||||
if !links.iter().any(|ld| {
|
||||
ld.source_id() == "shared_component_dev-hello"
|
||||
&& ld.target() == "shared_component-link_to_meee"
|
||||
&& ld.wit_namespace() == "custom"
|
||||
&& ld.wit_package() == "package"
|
||||
&& ld.interfaces() == &vec!["inter", "face"]
|
||||
&& ld.name() == "default"
|
||||
}) {
|
||||
anyhow::bail!("Link between hello components should exist: {:#?}", links)
|
||||
}
|
||||
if !links.iter().any(|ld| {
|
||||
ld.source_id() == "shared_component-link_to_meee"
|
||||
&& ld.target() == "shared_component_dev-hello"
|
||||
&& ld.wit_namespace() == "custom"
|
||||
&& ld.wit_package() == "package"
|
||||
&& ld.interfaces() == &vec!["inter", "face"]
|
||||
&& ld.name() == "default"
|
||||
}) {
|
||||
anyhow::bail!("Link between hello components should exist: {:#?}", links)
|
||||
}
|
||||
if !links.iter().any(|ld| {
|
||||
ld.source_id() == "shared_component_dev-httpserver"
|
||||
&& ld.target() == "shared_component-link_to_meee"
|
||||
&& ld.wit_namespace() == "wasi"
|
||||
&& ld.wit_package() == "http"
|
||||
&& ld.interfaces() == &vec!["incoming-handler"]
|
||||
&& ld.name() == "default"
|
||||
}) {
|
||||
anyhow::bail!(
|
||||
"Link between http server provider and hello component should exist: {:#?}",
|
||||
links
|
||||
)
|
||||
}
|
||||
|
||||
check_status(
|
||||
&stream,
|
||||
SHARED_COMPONENTS_LATTICE,
|
||||
"shared-component",
|
||||
StatusType::Deployed,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
check_status(
|
||||
&stream,
|
||||
SHARED_COMPONENTS_LATTICE,
|
||||
"shared-component-dev",
|
||||
StatusType::Deployed,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.await;
|
||||
}
|
||||
|
||||
async fn test_invalid_shared(client_info: &ClientInfo) {
|
||||
let client = client_info.wadm_client(INVALID_TEST_LATTICE);
|
||||
|
||||
// Including `image` and `application` is not supported
|
||||
assert!(client
|
||||
.put_manifest(client_info.load_raw_manifest("both_properties.yaml").await)
|
||||
.await
|
||||
.is_err());
|
||||
// Must include `image` or `application`
|
||||
assert!(client
|
||||
.put_manifest(client_info.load_raw_manifest("no_properties.yaml").await)
|
||||
.await
|
||||
.is_err());
|
||||
|
||||
// If the app or component is mismatched, should warn at put time
|
||||
// and fail to deploy
|
||||
let (name, _version) = client
|
||||
.put_manifest(client_info.load_raw_manifest("no_matching_app.yaml").await)
|
||||
.await
|
||||
.expect("Shouldn't have errored when creating manifest");
|
||||
assert!(client.deploy_manifest(&name, None).await.is_err());
|
||||
let (name, _version) = client
|
||||
.put_manifest(
|
||||
client_info
|
||||
.load_raw_manifest("no_matching_component.yaml")
|
||||
.await,
|
||||
)
|
||||
.await
|
||||
.expect("Shouldn't have errored when creating manifest");
|
||||
assert!(client.deploy_manifest(&name, None).await.is_err());
|
||||
|
||||
// Deploy manifest, but not shared, and another app that depends on it, which should fail
|
||||
let (name, _version) = client
|
||||
.put_manifest(client_info.load_raw_manifest("notshared_http.yaml").await)
|
||||
.await
|
||||
.expect("Shouldn't have errored when creating manifest");
|
||||
client
|
||||
.deploy_manifest(&name, None)
|
||||
.await
|
||||
.expect("Shouldn't have errored when deploying manifest");
|
||||
let (name, _version) = client
|
||||
.put_manifest(
|
||||
client_info
|
||||
.load_raw_manifest("notshared_http_dev.yaml")
|
||||
.await,
|
||||
)
|
||||
.await
|
||||
.expect("Shouldn't have errored when creating manifest");
|
||||
assert!(client.deploy_manifest(&name, None).await.is_err());
|
||||
}
|
|
@ -17,7 +17,7 @@ use e2e::{
|
|||
use helpers::{HELLO_COMPONENT_ID, HELLO_IMAGE_REF, HTTP_SERVER_COMPONENT_ID};
|
||||
|
||||
const MANIFESTS_PATH: &str = "tests/fixtures/manifests";
|
||||
const DOCKER_COMPOSE_FILE: &str = "tests/docker-compose-e2e-upgrade.yaml";
|
||||
const DOCKER_COMPOSE_FILE: &str = "tests/docker-compose-e2e_upgrades.yaml";
|
||||
const KEYVALUE_REDIS_COMPONENT_ID: &str = "keyvalue_redis";
|
||||
const DOG_FETCHER_GENERATED_ID: &str = "dog_fetcher";
|
||||
const KVCOUNTER_GENERATED_ID: &str = "kvcounter";
|
||||
|
@ -35,7 +35,9 @@ async fn run_upgrade_tests() {
|
|||
let mut client_info = ClientInfo::new(manifest_dir, compose_file).await;
|
||||
client_info.add_ctl_client("default", None).await;
|
||||
client_info.add_wadm_client("default").await;
|
||||
client_info.launch_wadm().await;
|
||||
client_info
|
||||
.launch_wadm(Some(HashMap::from([("--stream-persistence", "memory")])))
|
||||
.await;
|
||||
|
||||
// Wait for the first event on the lattice prefix before we start deploying and checking
|
||||
// statuses. Wadm can absolutely handle hosts starting before you start the wadm process, but the first event
|
||||
|
@ -136,21 +138,21 @@ async fn test_upgrade(client_info: &ClientInfo) {
|
|||
.get_links()
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e:?}"))?
|
||||
.response
|
||||
.into_data()
|
||||
.context("should have links")?;
|
||||
|
||||
let http_link = links
|
||||
.iter()
|
||||
.find(|link| {
|
||||
link.target == HELLO_COMPONENT_ID
|
||||
&& link.source_id == HTTP_SERVER_COMPONENT_ID
|
||||
&& link.wit_namespace == "wasi"
|
||||
&& link.wit_package == "http"
|
||||
link.target() == HELLO_COMPONENT_ID
|
||||
&& link.source_id() == HTTP_SERVER_COMPONENT_ID
|
||||
&& link.wit_namespace() == "wasi"
|
||||
&& link.wit_package() == "http"
|
||||
})
|
||||
.context("Should have http link with hello")?;
|
||||
if let Err(e) = check_config(
|
||||
client_info.ctl_client("default"),
|
||||
&http_link.source_config[0],
|
||||
&http_link.source_config()[0],
|
||||
&HashMap::from_iter([("address".to_string(), "0.0.0.0:8080".to_string())]),
|
||||
)
|
||||
.await
|
||||
|
@ -164,15 +166,15 @@ async fn test_upgrade(client_info: &ClientInfo) {
|
|||
let dog_link = links
|
||||
.iter()
|
||||
.find(|link| {
|
||||
link.target == generated_dogfetcher_id
|
||||
&& link.source_id == HTTP_SERVER_COMPONENT_ID
|
||||
&& link.wit_namespace == "wasi"
|
||||
&& link.wit_package == "http"
|
||||
link.target() == generated_dogfetcher_id
|
||||
&& link.source_id() == HTTP_SERVER_COMPONENT_ID
|
||||
&& link.wit_namespace() == "wasi"
|
||||
&& link.wit_package() == "http"
|
||||
})
|
||||
.context("Should have http link with dog-fetcher")?;
|
||||
if let Err(e) = check_config(
|
||||
client_info.ctl_client("default"),
|
||||
&dog_link.source_config[0],
|
||||
&dog_link.source_config()[0],
|
||||
&HashMap::from_iter([("address".to_string(), "0.0.0.0:8081".to_string())]),
|
||||
)
|
||||
.await
|
||||
|
@ -186,15 +188,15 @@ async fn test_upgrade(client_info: &ClientInfo) {
|
|||
let kv_link = links
|
||||
.iter()
|
||||
.find(|link| {
|
||||
link.source_id == generated_kvcounter_id
|
||||
&& link.target == KEYVALUE_REDIS_COMPONENT_ID
|
||||
&& link.wit_namespace == "wasi"
|
||||
&& link.wit_package == "keyvalue"
|
||||
link.source_id() == generated_kvcounter_id
|
||||
&& link.target() == KEYVALUE_REDIS_COMPONENT_ID
|
||||
&& link.wit_namespace() == "wasi"
|
||||
&& link.wit_package() == "keyvalue"
|
||||
})
|
||||
.context("Should have redis link with kvcounter")?;
|
||||
if let Err(e) = check_config(
|
||||
client_info.ctl_client("default"),
|
||||
&kv_link.target_config[0],
|
||||
&kv_link.target_config()[0],
|
||||
&HashMap::from_iter([("URL".to_string(), "redis://127.0.0.1:6379".to_string())]),
|
||||
)
|
||||
.await
|
||||
|
@ -291,7 +293,7 @@ async fn test_upgrade(client_info: &ClientInfo) {
|
|||
)?;
|
||||
check_providers(
|
||||
&inventory,
|
||||
"ghcr.io/wasmcloud/http-server:0.21.0",
|
||||
"ghcr.io/wasmcloud/http-server:0.23.0",
|
||||
ExpectedCount::Exactly(1),
|
||||
)?;
|
||||
check_providers(
|
||||
|
@ -305,22 +307,22 @@ async fn test_upgrade(client_info: &ClientInfo) {
|
|||
.get_links()
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e:?}"))?
|
||||
.response
|
||||
.into_data()
|
||||
.context("should've had links")?;
|
||||
|
||||
let http_link = links
|
||||
.iter()
|
||||
.find(|link| {
|
||||
link.target == HELLO_COMPONENT_ID
|
||||
&& link.source_id == HTTP_SERVER_COMPONENT_ID
|
||||
&& link.wit_namespace == "wasi"
|
||||
&& link.wit_package == "http"
|
||||
link.target() == HELLO_COMPONENT_ID
|
||||
&& link.source_id() == HTTP_SERVER_COMPONENT_ID
|
||||
&& link.wit_namespace() == "wasi"
|
||||
&& link.wit_package() == "http"
|
||||
})
|
||||
.ok_or_else(|| anyhow::anyhow!("Should have http link with hello"))?;
|
||||
|
||||
if let Err(e) = check_config(
|
||||
client_info.ctl_client("default"),
|
||||
&http_link.source_config[0],
|
||||
&http_link.source_config()[0],
|
||||
&HashMap::from([("address".to_string(), "0.0.0.0:8082".to_string())]),
|
||||
)
|
||||
.await
|
||||
|
@ -332,10 +334,10 @@ async fn test_upgrade(client_info: &ClientInfo) {
|
|||
}
|
||||
|
||||
if links.iter().any(|ld| {
|
||||
ld.source_id == generated_kvcounter_id
|
||||
&& ld.target == KEYVALUE_REDIS_COMPONENT_ID
|
||||
&& ld.wit_namespace == "wasi"
|
||||
&& ld.wit_package == "keyvalue"
|
||||
ld.source_id() == generated_kvcounter_id
|
||||
&& ld.target() == KEYVALUE_REDIS_COMPONENT_ID
|
||||
&& ld.wit_namespace() == "wasi"
|
||||
&& ld.wit_package() == "keyvalue"
|
||||
}) {
|
||||
anyhow::bail!(
|
||||
"Link between kvcounter component and redis provider should not exist: {:#?}",
|
||||
|
@ -409,7 +411,7 @@ async fn test_upgrade(client_info: &ClientInfo) {
|
|||
)?;
|
||||
check_providers(
|
||||
&inventory,
|
||||
"ghcr.io/wasmcloud/http-server:0.21.0",
|
||||
"ghcr.io/wasmcloud/http-server:0.23.0",
|
||||
ExpectedCount::Exactly(1),
|
||||
)?;
|
||||
check_providers(
|
||||
|
@ -423,22 +425,22 @@ async fn test_upgrade(client_info: &ClientInfo) {
|
|||
.get_links()
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{e:?}"))?
|
||||
.response
|
||||
.into_data()
|
||||
.context("should've had links")?;
|
||||
|
||||
let http_link = links
|
||||
.iter()
|
||||
.find(|link| {
|
||||
link.target == HELLO_COMPONENT_ID
|
||||
&& link.source_id == HTTP_SERVER_COMPONENT_ID
|
||||
&& link.wit_namespace == "wasi"
|
||||
&& link.wit_package == "http"
|
||||
link.target() == HELLO_COMPONENT_ID
|
||||
&& link.source_id() == HTTP_SERVER_COMPONENT_ID
|
||||
&& link.wit_namespace() == "wasi"
|
||||
&& link.wit_package() == "http"
|
||||
})
|
||||
.ok_or_else(|| anyhow::anyhow!("Should have http link with hello"))?;
|
||||
|
||||
if let Err(e) = check_config(
|
||||
client_info.ctl_client("default"),
|
||||
&http_link.source_config[0],
|
||||
&http_link.source_config()[0],
|
||||
&HashMap::from([("address".to_string(), "0.0.0.0:8080".to_string())]),
|
||||
)
|
||||
.await
|
||||
|
@ -452,16 +454,16 @@ async fn test_upgrade(client_info: &ClientInfo) {
|
|||
let dog_link = links
|
||||
.iter()
|
||||
.find(|link| {
|
||||
link.target == generated_dogfetcher_id
|
||||
&& link.source_id == HTTP_SERVER_COMPONENT_ID
|
||||
&& link.wit_namespace == "wasi"
|
||||
&& link.wit_package == "http"
|
||||
link.target() == generated_dogfetcher_id
|
||||
&& link.source_id() == HTTP_SERVER_COMPONENT_ID
|
||||
&& link.wit_namespace() == "wasi"
|
||||
&& link.wit_package() == "http"
|
||||
})
|
||||
.ok_or_else(|| anyhow::anyhow!("Should have dog link with hello"))?;
|
||||
|
||||
if let Err(e) = check_config(
|
||||
client_info.ctl_client("default"),
|
||||
&dog_link.source_config[0],
|
||||
&dog_link.source_config()[0],
|
||||
&HashMap::from([("address".to_string(), "0.0.0.0:8081".to_string())]),
|
||||
)
|
||||
.await
|
||||
|
@ -473,10 +475,10 @@ async fn test_upgrade(client_info: &ClientInfo) {
|
|||
}
|
||||
|
||||
if links.iter().any(|ld| {
|
||||
ld.source_id == generated_kvcounter_id
|
||||
&& ld.target == KEYVALUE_REDIS_COMPONENT_ID
|
||||
&& ld.wit_namespace == "wasi"
|
||||
&& ld.wit_package == "keyvalue"
|
||||
ld.source_id() == generated_kvcounter_id
|
||||
&& ld.target() == KEYVALUE_REDIS_COMPONENT_ID
|
||||
&& ld.wit_namespace() == "wasi"
|
||||
&& ld.wit_package() == "keyvalue"
|
||||
}) {
|
||||
anyhow::bail!(
|
||||
"Link between kvcounter component and redis provider should not exist: {:#?}",
|
||||
|
@ -510,12 +512,12 @@ async fn test_upgrade(client_info: &ClientInfo) {
|
|||
// let inventory = client_info.get_all_inventory("default").await?;
|
||||
// check_providers(
|
||||
// &inventory,
|
||||
// "ghcr.io/wasmcloud/http-server:0.21.0",
|
||||
// "ghcr.io/wasmcloud/http-server:0.23.0",
|
||||
// ExpectedCount::Exactly(1),
|
||||
// )?;
|
||||
// check_providers(
|
||||
// &inventory,
|
||||
// "ghcr.io/wasmcloud/http-server:0.21.0",
|
||||
// "ghcr.io/wasmcloud/http-server:0.23.0",
|
||||
// ExpectedCount::Exactly(0),
|
||||
// )?;
|
||||
// Ok(())
|
||||
|
|
|
@ -161,14 +161,14 @@ async fn test_event_stream() -> Result<()> {
|
|||
let mut evt = wait_for_event(&mut stream, LINK_OPERATION_TIMEOUT_DURATION).await;
|
||||
if let Event::LinkdefSet(event) = evt.as_ref() {
|
||||
assert_eq!(
|
||||
event.linkdef.source_id, HELLO_COMPONENT_ID,
|
||||
event.linkdef.source_id(), HELLO_COMPONENT_ID,
|
||||
"Expected to get a linkdef event for the right component and provider, got component ID: {}",
|
||||
event.linkdef.source_id,
|
||||
event.linkdef.source_id(),
|
||||
);
|
||||
assert_eq!(
|
||||
event.linkdef.target, HTTP_SERVER_COMPONENT_ID,
|
||||
event.linkdef.target(), HTTP_SERVER_COMPONENT_ID,
|
||||
"Expected to get a linkdef event for the right component and provider, got provider ID: {}",
|
||||
event.linkdef.target,
|
||||
event.linkdef.target(),
|
||||
);
|
||||
} else {
|
||||
panic!("Event wasn't an link set event");
|
||||
|
@ -186,6 +186,8 @@ async fn test_event_stream() -> Result<()> {
|
|||
HELLO_COMPONENT_ID,
|
||||
"wasi",
|
||||
"http",
|
||||
"--link-name",
|
||||
"default",
|
||||
"--ctl-port",
|
||||
&ctl_port,
|
||||
])
|
||||
|
|
|
@ -9,7 +9,7 @@ spec:
|
|||
- name: test-policy
|
||||
type: test
|
||||
properties:
|
||||
test: "data"
|
||||
test: 'data'
|
||||
components:
|
||||
- name: hello
|
||||
type: component
|
||||
|
@ -36,7 +36,7 @@ spec:
|
|||
- name: httpserver
|
||||
type: capability
|
||||
properties:
|
||||
image: ghcr.io/wasmcloud/http-server:0.21.0
|
||||
image: ghcr.io/wasmcloud/http-server:0.23.0
|
||||
traits:
|
||||
- type: spreadscaler
|
||||
properties:
|
||||
|
|
|
@ -50,7 +50,7 @@ spec:
|
|||
- name: httpserver
|
||||
type: capability
|
||||
properties:
|
||||
image: ghcr.io/wasmcloud/http-server:0.21.0
|
||||
image: ghcr.io/wasmcloud/http-server:0.23.0
|
||||
id: http_server
|
||||
traits:
|
||||
- type: spreadscaler
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: test-different-interfaces
|
||||
annotations:
|
||||
description: "test"
|
||||
spec:
|
||||
components:
|
||||
- name: my-component
|
||||
type: component
|
||||
properties:
|
||||
image: test:latest
|
||||
traits:
|
||||
- type: spreadscaler
|
||||
properties:
|
||||
instances: 1
|
||||
- type: link
|
||||
properties:
|
||||
target: redis
|
||||
namespace: wasi
|
||||
package: keyvalue
|
||||
interfaces: [atomics]
|
||||
- type: link
|
||||
properties:
|
||||
target: redis
|
||||
namespace: wasi
|
||||
package: keyvalue
|
||||
interfaces: [store]
|
||||
- name: redis
|
||||
type: capability
|
||||
properties:
|
||||
image: test:latest
|
||||
traits:
|
||||
- type: spreadscaler
|
||||
properties:
|
||||
instances: 1
|
|
@ -18,7 +18,8 @@ spec:
|
|||
instances: 1
|
||||
- type: link
|
||||
properties:
|
||||
namespace: wasi
|
||||
package: http
|
||||
interfaces: [outgoing-handler]
|
||||
target:
|
||||
name: httpserver
|
||||
values:
|
||||
address: 0.0.0.0:8080
|
||||
name: httpclient
|
||||
|
|
|
@ -0,0 +1,53 @@
|
|||
---
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: rust-http-blobstore
|
||||
annotations:
|
||||
version: v0.0.1
|
||||
description: 'HTTP Blobstore demo in Rust, using the WebAssembly Component Model and WebAssembly Interfaces Types (WIT)'
|
||||
wasmcloud.dev/authors: wasmCloud team
|
||||
wasmcloud.dev/source-url: https://github.com/wasmCloud/wasmCloud/blob/main/examples/rust/components/http-blobstore/wadm.yaml
|
||||
wasmcloud.dev/readme-md-url: https://github.com/wasmCloud/wasmCloud/blob/main/examples/rust/components/http-blobstore/README.md
|
||||
wasmcloud.dev/homepage: https://github.com/wasmCloud/wasmCloud/tree/main/examples/rust/components/http-blobstore
|
||||
wasmcloud.dev/categories: |
|
||||
http,http-server,rust,blobstore,object-storage,example
|
||||
spec:
|
||||
components:
|
||||
- name: http-blobstore
|
||||
type: component
|
||||
properties:
|
||||
image: ghcr.io/wasmcloud/components/http-blobstore-rust:0.2.0
|
||||
traits:
|
||||
- type: spreadscaler
|
||||
properties:
|
||||
instances: 1
|
||||
- type: link
|
||||
properties:
|
||||
target: blobstore-fs
|
||||
namespace: wasi
|
||||
package: blobstore
|
||||
interfaces: [blobstore]
|
||||
target_config:
|
||||
- name: root-directory
|
||||
properties:
|
||||
root: '/tmp'
|
||||
- name: httpserver
|
||||
type: capability
|
||||
properties:
|
||||
image: ghcr.io/wasmcloud/http-server:0.23.2
|
||||
traits:
|
||||
- type: link
|
||||
properties:
|
||||
target: http-blobstore
|
||||
namespace: wasi
|
||||
package: http
|
||||
interfaces: [incoming-handler]
|
||||
source_config:
|
||||
- name: default-http
|
||||
properties:
|
||||
address: 0.0.0.0:8000
|
||||
- name: blobstore-fs
|
||||
type: capability
|
||||
properties:
|
||||
image: ghcr.io/wasmcloud/blobstore-fs:0.10.1
|
|
@ -0,0 +1,96 @@
|
|||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: my-example-app
|
||||
annotations:
|
||||
description: "This is my app"
|
||||
spec:
|
||||
components:
|
||||
- name: userinfo1
|
||||
type: component
|
||||
properties:
|
||||
image: wasmcloud.azurecr.io/fake:1
|
||||
traits:
|
||||
- type: link
|
||||
properties:
|
||||
namespace: wasi
|
||||
package: keyvalue
|
||||
interfaces: [atomics, store]
|
||||
target:
|
||||
name: kvredis
|
||||
config:
|
||||
- name: redis-url
|
||||
properties:
|
||||
url: "redis://127.0.0.1:6379"
|
||||
# this config name is duplicated, but has no properties,
|
||||
# so it references an existing config
|
||||
- name: my_example_app-shared_redis
|
||||
|
||||
- name: userinfo2
|
||||
type: component
|
||||
properties:
|
||||
image: wasmcloud.azurecr.io/fake:1
|
||||
traits:
|
||||
- type: link
|
||||
properties:
|
||||
namespace: wasi
|
||||
package: keyvalue
|
||||
interfaces: [atomics, store]
|
||||
target:
|
||||
name: kvredis
|
||||
config:
|
||||
- name: redis-url
|
||||
properties:
|
||||
url: "redis://127.0.0.1:6379"
|
||||
# this config name is duplicated, but has no properties,
|
||||
# so it references an existing config
|
||||
- name: my_example_app-shared_redis
|
||||
|
||||
- name: webcap1
|
||||
type: capability
|
||||
properties:
|
||||
id: httpserver1
|
||||
image: wasmcloud.azurecr.io/httpserver:0.13.1
|
||||
traits:
|
||||
- type: link
|
||||
properties:
|
||||
namespace: wasi
|
||||
package: http
|
||||
interfaces: ["incoming-handler"]
|
||||
target:
|
||||
name: userinfo1
|
||||
source:
|
||||
config:
|
||||
- name: default-port
|
||||
properties:
|
||||
port: 0.0.0.0:8080
|
||||
- name: alternate-port
|
||||
properties:
|
||||
address: 0.0.0.0:8081
|
||||
- name: alternate-port
|
||||
properties:
|
||||
address: 0.0.0.0:8081
|
||||
|
||||
- name: webcap2
|
||||
type: capability
|
||||
properties:
|
||||
id: httpserver2
|
||||
image: wasmcloud.azurecr.io/httpserver:0.14.1
|
||||
traits:
|
||||
- type: link
|
||||
properties:
|
||||
target:
|
||||
name: userinfo2
|
||||
namespace: wasi
|
||||
package: http
|
||||
interfaces: ["incoming-handler"]
|
||||
source:
|
||||
config:
|
||||
- name: default-port
|
||||
properties:
|
||||
address: 0.0.0.0:8080
|
||||
|
||||
- name: kvredis
|
||||
type: capability
|
||||
properties:
|
||||
image: ghcr.io/wasmcloud/keyvalue-redis:0.28.1
|
|
@ -0,0 +1,49 @@
|
|||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: test-link-name-uniqueness
|
||||
annotations:
|
||||
description: 'test'
|
||||
spec:
|
||||
components:
|
||||
- name: http-component
|
||||
type: component
|
||||
properties:
|
||||
image: file://./build/http_hello_world_s.wasm
|
||||
traits:
|
||||
- type: spreadscaler
|
||||
properties:
|
||||
instances: 1
|
||||
- name: http-component-two
|
||||
type: component
|
||||
properties:
|
||||
image: file://./build/http_hello_world_s.wasm
|
||||
traits:
|
||||
- type: spreadscaler
|
||||
properties:
|
||||
instances: 1
|
||||
- name: httpserver
|
||||
type: capability
|
||||
properties:
|
||||
image: ghcr.io/wasmcloud/http-server:0.22.0
|
||||
traits:
|
||||
- type: link
|
||||
properties:
|
||||
target: http-component
|
||||
namespace: wasi
|
||||
package: http
|
||||
interfaces: [incoming-handler]
|
||||
source_config:
|
||||
- name: default-http
|
||||
properties:
|
||||
address: 127.0.0.1:8080
|
||||
- type: link
|
||||
properties:
|
||||
target: http-component-two
|
||||
namespace: wasi
|
||||
package: http
|
||||
interfaces: [incoming-handler]
|
||||
source_config:
|
||||
- name: default-http-two
|
||||
properties:
|
||||
address: 127.0.0.1:8081
|
|
@ -0,0 +1,44 @@
|
|||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: test-duplicate-interfaces
|
||||
annotations:
|
||||
description: "test"
|
||||
spec:
|
||||
components:
|
||||
- name: my-component
|
||||
type: component
|
||||
properties:
|
||||
image: test:latest
|
||||
traits:
|
||||
- type: spreadscaler
|
||||
properties:
|
||||
instances: 1
|
||||
- type: link
|
||||
properties:
|
||||
target: redis-1
|
||||
namespace: wasi
|
||||
package: keyvalue
|
||||
interfaces: [atomics]
|
||||
- type: link
|
||||
properties:
|
||||
target: redis-2
|
||||
namespace: wasi
|
||||
package: keyvalue
|
||||
interfaces: [atomics]
|
||||
- name: redis-1
|
||||
type: capability
|
||||
properties:
|
||||
image: test:latest
|
||||
traits:
|
||||
- type: spreadscaler
|
||||
properties:
|
||||
instances: 1
|
||||
- name: redis-2
|
||||
type: capability
|
||||
properties:
|
||||
image: test:latest
|
||||
traits:
|
||||
- type: spreadscaler
|
||||
properties:
|
||||
instances: 1
|
|
@ -18,7 +18,7 @@ spec:
|
|||
- name: httpserver
|
||||
type: capability
|
||||
properties:
|
||||
image: ghcr.io/wasmcloud/http-server:0.21.0
|
||||
image: ghcr.io/wasmcloud/http-server:0.23.0
|
||||
traits:
|
||||
- type: spreadscaler
|
||||
properties:
|
||||
|
|
|
@ -55,6 +55,7 @@ spec:
|
|||
traits:
|
||||
- type: link
|
||||
properties:
|
||||
name: hello
|
||||
target:
|
||||
name: hello-world
|
||||
namespace: wasi
|
||||
|
@ -68,6 +69,7 @@ spec:
|
|||
address: 0.0.0.0:8080
|
||||
- type: link
|
||||
properties:
|
||||
name: dog
|
||||
target:
|
||||
name: dog-fetcher
|
||||
namespace: wasi
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: both-props
|
||||
annotations:
|
||||
description: 'Contains a component with image and application'
|
||||
spec:
|
||||
components:
|
||||
- name: httpserver
|
||||
type: capability
|
||||
properties:
|
||||
image: pull-from-me
|
||||
application:
|
||||
name: wheee
|
||||
component: httpserver
|
|
@ -0,0 +1,35 @@
|
|||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: some-nonexistant-app
|
||||
annotations:
|
||||
description: 'Manifest that refers to a nonexistant app'
|
||||
spec:
|
||||
components:
|
||||
- name: hello
|
||||
type: component
|
||||
properties:
|
||||
image: ghcr.io/wasmcloud/components/http-hello-world-rust:0.1.0
|
||||
traits:
|
||||
- type: spreadscaler
|
||||
properties:
|
||||
instances: 12
|
||||
- name: httpserver
|
||||
type: capability
|
||||
properties:
|
||||
application:
|
||||
name: some-nonexistant-app
|
||||
component: httpserver
|
||||
traits:
|
||||
- type: link
|
||||
properties:
|
||||
namespace: wasi
|
||||
package: http
|
||||
interfaces: [incoming-handler]
|
||||
target:
|
||||
name: hello
|
||||
source:
|
||||
config:
|
||||
- name: httpaddr
|
||||
properties:
|
||||
address: 0.0.0.0:8080
|
|
@ -0,0 +1,35 @@
|
|||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: no-matching-component
|
||||
annotations:
|
||||
description: 'Manifest that refers to a nonexistant component'
|
||||
spec:
|
||||
components:
|
||||
- name: hello
|
||||
type: component
|
||||
properties:
|
||||
image: ghcr.io/wasmcloud/components/http-hello-world-rust:0.1.0
|
||||
traits:
|
||||
- type: spreadscaler
|
||||
properties:
|
||||
instances: 12
|
||||
- name: httpserver
|
||||
type: capability
|
||||
properties:
|
||||
application:
|
||||
name: shared-http
|
||||
component: some-nonexistant-component
|
||||
traits:
|
||||
- type: link
|
||||
properties:
|
||||
namespace: wasi
|
||||
package: http
|
||||
interfaces: [incoming-handler]
|
||||
target:
|
||||
name: hello
|
||||
source:
|
||||
config:
|
||||
- name: httpaddr
|
||||
properties:
|
||||
address: 0.0.0.0:8080
|
|
@ -0,0 +1,15 @@
|
|||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: no-props
|
||||
annotations:
|
||||
description: 'Contains a component with neither image and application'
|
||||
spec:
|
||||
components:
|
||||
- name: httpserver
|
||||
type: capability
|
||||
properties:
|
||||
config:
|
||||
- name: log
|
||||
properties:
|
||||
level: info
|
|
@ -0,0 +1,16 @@
|
|||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: not-shared-http
|
||||
annotations:
|
||||
description: 'My Precious! O my Precious! We needs it. Must have the precious. They stole it from us'
|
||||
spec:
|
||||
components:
|
||||
- name: httpserver
|
||||
type: capability
|
||||
properties:
|
||||
image: ghcr.io/wasmcloud/http-server:0.23.0
|
||||
traits:
|
||||
- type: spreadscaler
|
||||
properties:
|
||||
instances: 1
|
|
@ -0,0 +1,35 @@
|
|||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: not-shared-http-dev
|
||||
annotations:
|
||||
description: 'A Hello World app that tries to use a not shared component'
|
||||
spec:
|
||||
components:
|
||||
- name: hello
|
||||
type: component
|
||||
properties:
|
||||
image: ghcr.io/wasmcloud/components/http-hello-world-rust:0.1.0
|
||||
traits:
|
||||
- type: spreadscaler
|
||||
properties:
|
||||
instances: 12
|
||||
- name: httpserver
|
||||
type: capability
|
||||
properties:
|
||||
application:
|
||||
name: not-shared-http
|
||||
component: httpserver
|
||||
traits:
|
||||
- type: link
|
||||
properties:
|
||||
namespace: wasi
|
||||
package: http
|
||||
interfaces: [incoming-handler]
|
||||
target:
|
||||
name: hello
|
||||
source:
|
||||
config:
|
||||
- name: httpaddr
|
||||
properties:
|
||||
address: 0.0.0.0:8080
|
|
@ -0,0 +1,21 @@
|
|||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: shared-component
|
||||
annotations:
|
||||
description: 'A shared component!'
|
||||
experimental.wasmcloud.dev/shared: 'true'
|
||||
spec:
|
||||
components:
|
||||
- name: link-to-meee
|
||||
type: component
|
||||
properties:
|
||||
image: ghcr.io/wasmcloud/components/http-hello-world-rust:0.1.0
|
||||
config:
|
||||
- name: defaults
|
||||
properties:
|
||||
left: right
|
||||
traits:
|
||||
- type: spreadscaler
|
||||
properties:
|
||||
instances: 1
|
|
@ -0,0 +1,59 @@
|
|||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: shared-component-dev
|
||||
annotations:
|
||||
description: 'A Hello World app for testing, most basic link'
|
||||
spec:
|
||||
components:
|
||||
# Link a component to a shared component
|
||||
- name: hello
|
||||
type: component
|
||||
properties:
|
||||
image: ghcr.io/wasmcloud/components/http-hello-world-rust:0.1.0
|
||||
traits:
|
||||
- type: spreadscaler
|
||||
properties:
|
||||
instances: 12
|
||||
- type: link
|
||||
properties:
|
||||
namespace: custom
|
||||
package: package
|
||||
interfaces: [inter, face]
|
||||
target:
|
||||
name: component-dep
|
||||
# Shared component, link to a component in this application
|
||||
- name: component-dep
|
||||
type: component
|
||||
properties:
|
||||
application:
|
||||
name: shared-component
|
||||
component: link-to-meee
|
||||
traits:
|
||||
- type: link
|
||||
properties:
|
||||
namespace: custom
|
||||
package: package
|
||||
interfaces: [inter, face]
|
||||
target:
|
||||
name: hello
|
||||
config:
|
||||
- name: someconfig
|
||||
properties:
|
||||
foo: bar
|
||||
# Link a provider to a shared component
|
||||
- name: httpserver
|
||||
type: capability
|
||||
properties:
|
||||
image: ghcr.io/wasmcloud/http-server:0.23.0
|
||||
traits:
|
||||
- type: spreadscaler
|
||||
properties:
|
||||
instances: 1
|
||||
- type: link
|
||||
properties:
|
||||
namespace: wasi
|
||||
package: http
|
||||
interfaces: [incoming-handler]
|
||||
target:
|
||||
name: component-dep
|
|
@ -0,0 +1,25 @@
|
|||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: shared-http
|
||||
annotations:
|
||||
description: 'A shared HTTP server and client, for everybody!!!!!!!!!!!!!!!!!!!!'
|
||||
experimental.wasmcloud.dev/shared: 'true'
|
||||
spec:
|
||||
components:
|
||||
- name: httpclient
|
||||
type: capability
|
||||
properties:
|
||||
image: ghcr.io/wasmcloud/http-client:0.12.0
|
||||
traits:
|
||||
- type: spreadscaler
|
||||
properties:
|
||||
instances: 1
|
||||
- name: httpserver
|
||||
type: capability
|
||||
properties:
|
||||
image: ghcr.io/wasmcloud/http-server:0.23.0
|
||||
traits:
|
||||
- type: spreadscaler
|
||||
properties:
|
||||
instances: 1
|
|
@ -0,0 +1,50 @@
|
|||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: shared-http-dev
|
||||
annotations:
|
||||
description: 'A Hello World app for testing, most basic HTTP link'
|
||||
spec:
|
||||
components:
|
||||
- name: hello
|
||||
type: component
|
||||
properties:
|
||||
image: ghcr.io/wasmcloud/components/http-hello-world-rust:0.1.0
|
||||
traits:
|
||||
- type: spreadscaler
|
||||
properties:
|
||||
instances: 12
|
||||
- type: link
|
||||
properties:
|
||||
namespace: wasi
|
||||
package: http
|
||||
interfaces: [outgoing-handler]
|
||||
target:
|
||||
# Note that the name in this manifest does not have to be the same
|
||||
# as the name of the component in the shared manifest
|
||||
name: http-client-this
|
||||
- name: http-client-this
|
||||
type: capability
|
||||
properties:
|
||||
application:
|
||||
name: shared-http
|
||||
component: httpclient
|
||||
- name: httpserver
|
||||
type: capability
|
||||
properties:
|
||||
application:
|
||||
name: shared-http
|
||||
component: httpserver
|
||||
traits:
|
||||
- type: link
|
||||
properties:
|
||||
namespace: wasi
|
||||
package: http
|
||||
interfaces: [incoming-handler]
|
||||
target:
|
||||
name: hello
|
||||
source:
|
||||
config:
|
||||
- name: httpaddr
|
||||
properties:
|
||||
address: 0.0.0.0:8080
|
|
@ -19,7 +19,7 @@ spec:
|
|||
- name: httpserver
|
||||
type: capability
|
||||
properties:
|
||||
image: ghcr.io/wasmcloud/http-server:0.21.0
|
||||
image: ghcr.io/wasmcloud/http-server:0.23.0
|
||||
id: http_server
|
||||
traits:
|
||||
- type: spreadscaler
|
||||
|
|
|
@ -40,12 +40,13 @@ spec:
|
|||
- name: httpserver
|
||||
type: capability
|
||||
properties:
|
||||
image: ghcr.io/wasmcloud/http-server:0.21.0
|
||||
image: ghcr.io/wasmcloud/http-server:0.23.0
|
||||
id: http_server
|
||||
traits:
|
||||
# Updated linkdef trait
|
||||
- type: link
|
||||
properties:
|
||||
name: hello
|
||||
target:
|
||||
name: hello-world
|
||||
namespace: wasi
|
||||
|
@ -59,6 +60,7 @@ spec:
|
|||
address: 0.0.0.0:8082
|
||||
- type: link
|
||||
properties:
|
||||
name: dog
|
||||
target:
|
||||
name: dog-fetcher
|
||||
namespace: wasi
|
||||
|
|
|
@ -35,7 +35,7 @@ spec:
|
|||
- name: httpserver
|
||||
type: capability
|
||||
properties:
|
||||
image: ghcr.io/wasmcloud/http-server:0.21.0
|
||||
image: ghcr.io/wasmcloud/http-server:0.23.0
|
||||
id: http_server
|
||||
# Updated config
|
||||
config:
|
||||
|
@ -45,6 +45,7 @@ spec:
|
|||
traits:
|
||||
- type: link
|
||||
properties:
|
||||
name: hello
|
||||
target:
|
||||
name: hello-world
|
||||
namespace: wasi
|
||||
|
@ -59,6 +60,7 @@ spec:
|
|||
address: 0.0.0.0:8080
|
||||
- type: link
|
||||
properties:
|
||||
name: dog
|
||||
target:
|
||||
name: dog-fetcher
|
||||
namespace: wasi
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue