Compare commits

..

No commits in common. "main" and "client-v0.1.1" have entirely different histories.

168 changed files with 7042 additions and 15961 deletions

View File

@ -19,7 +19,8 @@
},
"extensions": [
"rust-lang.rust-analyzer",
"tamasfe.even-better-toml"
"tamasfe.even-better-toml",
"serayuzgur.crates"
]
}
},

5
.envrc
View File

@ -1,5 +0,0 @@
if ! has nix_direnv_version || ! nix_direnv_version 3.0.6; then
source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.6/direnvrc" "sha256-RYcUJaRMf8oF5LznDrlCXbkOQrywm0HDv1VjYGaJGdM="
fi
watch_file rust-toolchain.toml
use flake

4
.github/CODEOWNERS vendored
View File

@ -1,2 +1,2 @@
# wasmCloud wadm maintainers
* @wasmCloud/wadm-maintainers
# wasmCloud team members
* @autodidaddict @brooksmtownsend @thomastaylor312 @connorsmith256

View File

@ -1,38 +0,0 @@
name: Install and configure wkg (linux only)
inputs:
wkg-version:
description: version of wkg to install. Should be a valid tag from https://github.com/bytecodealliance/wasm-pkg-tools/releases
default: "v0.6.0"
oci-username:
description: username for oci registry
required: true
oci-password:
description: password for oci registry
required: true
runs:
using: composite
steps:
- name: Download wkg
shell: bash
run: |
curl --fail -L https://github.com/bytecodealliance/wasm-pkg-tools/releases/download/${{ inputs.wkg-version }}/wkg-x86_64-unknown-linux-gnu -o wkg
chmod +x wkg;
echo "$(realpath .)" >> "$GITHUB_PATH";
- name: Generate and set wkg config
shell: bash
env:
WKG_OCI_USERNAME: ${{ inputs.oci-username }}
WKG_OCI_PASSWORD: ${{ inputs.oci-password }}
run: |
cat << EOF > wkg-config.toml
[namespace_registries]
wasmcloud = "wasmcloud.com"
wrpc = "bytecodealliance.org"
wasi = "wasi.dev"
[registry."wasmcloud.com".oci]
auth = { username = "${WKG_OCI_USERNAME}", password = "${WKG_OCI_PASSWORD}" }
EOF
echo "WKG_CONFIG_FILE=$(realpath wkg-config.toml)" >> $GITHUB_ENV

View File

@ -1,16 +0,0 @@
version: 2
updates:
- package-ecosystem: "cargo"
directory: "/"
schedule:
interval: "weekly"
day: "monday"
time: "09:00"
timezone: "America/New_York"
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"
day: "monday"
time: "09:00"
timezone: "America/New_York"

6
.github/release.yml vendored
View File

@ -1,6 +0,0 @@
# .github/release.yml
changelog:
exclude:
authors:
- dependabot

View File

@ -2,7 +2,6 @@ name: chart
env:
HELM_VERSION: v3.14.0
CHART_TESTING_NAMESPACE: chart-testing
on:
push:
@ -13,15 +12,12 @@ on:
- 'charts/**'
- '.github/workflows/chart.yml'
permissions:
contents: read
jobs:
validate:
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- uses: actions/checkout@v4
with:
fetch-depth: 0
@ -30,18 +26,18 @@ jobs:
git fetch origin main:main
- name: Set up Helm
uses: azure/setup-helm@b9e51907a09c216f16ebe8536097933489208112 # v4.3.0
uses: azure/setup-helm@v4
with:
version: ${{ env.HELM_VERSION }}
# Used by helm chart-testing below
- name: Set up Python
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
uses: actions/setup-python@v5.0.0
with:
python-version: '3.12.2'
- name: Set up chart-testing
uses: helm/chart-testing-action@0d28d3144d3a25ea2cc349d6e59901c4ff469b3b # v2.7.0
uses: helm/chart-testing-action@v2.6.1
with:
version: v3.10.1
yamllint_version: 1.35.1
@ -52,7 +48,7 @@ jobs:
ct lint --config charts/wadm/ct.yaml
- name: Create kind cluster
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
uses: helm/kind-action@v1.9.0
with:
version: "v0.22.0"
@ -60,29 +56,24 @@ jobs:
run: |
helm repo add nats https://nats-io.github.io/k8s/helm/charts/
helm repo update
helm install nats nats/nats -f charts/wadm/ci/nats.yaml --namespace ${{ env.CHART_TESTING_NAMESPACE }} --create-namespace
helm install nats nats/nats -f charts/wadm/ci/nats.yaml
- name: Run chart-testing install / same namespace
- name: Run chart-testing (install)
run: |
ct install --config charts/wadm/ct.yaml --namespace ${{ env.CHART_TESTING_NAMESPACE }}
- name: Run chart-testing install / across namespaces
run: |
ct install --config charts/wadm/ct.yaml --helm-extra-set-args "--set=wadm.config.nats.server=nats://nats-headless.${{ env.CHART_TESTING_NAMESPACE }}.svc.cluster.local"
ct install --config charts/wadm/ct.yaml
publish:
if: ${{ startsWith(github.ref, 'refs/tags/chart-v') }}
runs-on: ubuntu-22.04
needs: validate
permissions:
contents: read
packages: write
steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- uses: actions/checkout@v4
- name: Set up Helm
uses: azure/setup-helm@b9e51907a09c216f16ebe8536097933489208112 # v4.3.0
uses: azure/setup-helm@v4
with:
version: ${{ env.HELM_VERSION }}
@ -91,7 +82,7 @@ jobs:
helm package charts/wadm -d .helm-charts
- name: Login to GHCR
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}

View File

@ -5,9 +5,6 @@ on:
branches:
- main
permissions:
contents: read
jobs:
test:
name: e2e
@ -15,42 +12,38 @@ jobs:
strategy:
fail-fast: false
matrix:
test: [e2e_multiple_hosts, e2e_upgrades, e2e_shared]
# TODO: Re-enable the multitenant and upgrades tests in followup to #247
# e2e_test: [e2e_multiple_hosts, e2e_multitenant, e2e_upgrades]
e2e_test: [e2e_multiple_hosts]
steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- uses: actions/checkout@v4
- name: Install latest Rust stable toolchain
uses: dtolnay/rust-toolchain@1ff72ee08e3cb84d84adba594e0a297990fc1ed3 # stable
uses: dtolnay/rust-toolchain@stable
with:
toolchain: stable
default: true
components: clippy, rustfmt
# Cache: rust
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
- uses: Swatinem/rust-cache@v2
with:
key: 'ubuntu-22.04-rust-cache'
# If the test uses a docker compose file, pre-emptively pull images used in docker compose
- name: Pull images for test ${{ matrix.test }}
shell: bash
run: |
export DOCKER_COMPOSE_FILE=tests/docker-compose-${{ matrix.test }}.yaml;
[[ -f "$DOCKER_COMPOSE_FILE" ]] && docker compose -f $DOCKER_COMPOSE_FILE pull;
key: "ubuntu-22.04-rust-cache"
# Run e2e tests in a matrix for efficiency
- name: Run tests ${{ matrix.test }}
- name: Run tests ${{ matrix.e2e_test }}
id: test
env:
WADM_E2E_TEST: ${{ matrix.test }}
WADM_E2E_TEST: ${{ matrix.e2e_test }}
run: make test-individual-e2e
# if the previous step fails, upload logs
- name: Upload logs for debugging
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
uses: actions/upload-artifact@v3
if: ${{ failure() && steps.test.outcome == 'failure' }}
with:
name: e2e-logs-${{ matrix.test }}
path: ./tests/e2e_log/*
name: e2e-logs-${{ matrix.e2e_test }}
path: ./test/e2e_log/*
# Be nice and only retain the logs for 7 days
retention-days: 7

View File

@ -9,132 +9,141 @@ on:
- 'client-v*'
workflow_dispatch: # Allow manual creation of artifacts without a release
permissions:
contents: read
defaults:
run:
shell: bash
jobs:
build:
name: build release assets
runs-on: ${{ matrix.config.runnerOs }}
runs-on: ${{ matrix.config.os }}
outputs:
version_output: ${{ steps.version_output.outputs.version }}
strategy:
matrix:
config:
# NOTE: We are building on an older version of ubuntu because of libc compatibility
# issues. Namely, if we build on a new version of libc, it isn't backwards compatible with
# old versions. But if we build on the old version, it is compatible with the newer
# versions running in ubuntu 22 and its ilk
- {
runnerOs: 'ubuntu-latest',
buildCommand: 'cargo zigbuild',
target: 'x86_64-unknown-linux-musl',
uploadArtifactSuffix: 'linux-amd64',
buildOutputPath: 'target/x86_64-unknown-linux-musl/release/wadm',
os: 'ubuntu-20.04',
arch: 'amd64',
extension: '',
targetPath: 'target/release/',
}
- {
runnerOs: 'ubuntu-latest',
buildCommand: 'cargo zigbuild',
target: 'aarch64-unknown-linux-musl',
uploadArtifactSuffix: 'linux-aarch64',
buildOutputPath: 'target/aarch64-unknown-linux-musl/release/wadm',
os: 'ubuntu-20.04',
arch: 'aarch64',
extension: '',
targetPath: 'target/aarch64-unknown-linux-gnu/release/',
}
- {
runnerOs: 'macos-14',
buildCommand: 'cargo zigbuild',
target: 'x86_64-apple-darwin',
uploadArtifactSuffix: 'macos-amd64',
buildOutputPath: 'target/x86_64-apple-darwin/release/wadm',
os: 'macos-13',
arch: 'amd64',
extension: '',
targetPath: 'target/release/',
}
- {
runnerOs: 'macos-14',
buildCommand: 'cargo zigbuild',
target: 'aarch64-apple-darwin',
uploadArtifactSuffix: 'macos-aarch64',
buildOutputPath: 'target/aarch64-apple-darwin/release/wadm',
os: 'windows-latest',
arch: 'amd64',
extension: '.exe',
targetPath: 'target/release/',
}
- {
runnerOs: 'windows-latest',
buildCommand: 'cargo build',
target: 'x86_64-pc-windows-msvc',
uploadArtifactSuffix: 'windows-amd64',
buildOutputPath: 'target/x86_64-pc-windows-msvc/release/wadm.exe',
os: 'macos-latest',
arch: 'aarch64',
extension: '',
targetPath: 'target/release/',
}
steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- uses: actions/checkout@v4
- name: set the release version (tag)
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
run: |
echo "RELEASE_VERSION=${GITHUB_REF/refs\/tags\//}" >> $GITHUB_ENV
if: startsWith(github.ref, 'refs/tags/v')
shell: bash
run: echo "RELEASE_VERSION=${GITHUB_REF/refs\/tags\//}" >> $GITHUB_ENV
- name: set the release version (main)
if: ${{ github.ref == 'refs/heads/main' }}
run: |
echo "RELEASE_VERSION=canary" >> $GITHUB_ENV
if: github.ref == 'refs/heads/main'
shell: bash
run: echo "RELEASE_VERSION=canary" >> $GITHUB_ENV
- name: Output Version
id: version_output
run: echo "version=$RELEASE_VERSION" >> $GITHUB_OUTPUT
- name: Install Zig
uses: mlugg/setup-zig@8d6198c65fb0feaa111df26e6b467fea8345e46f # v2.0.5
with:
version: 0.13.0
- name: lowercase the runner OS name
shell: bash
run: |
OS=$(echo "${{ runner.os }}" | tr '[:upper:]' '[:lower:]')
echo "RUNNER_OS=$OS" >> $GITHUB_ENV
- name: Install latest Rust stable toolchain
uses: dtolnay/rust-toolchain@1ff72ee08e3cb84d84adba594e0a297990fc1ed3 # stable
uses: dtolnay/rust-toolchain@stable
if: matrix.config.arch != 'aarch64' || startsWith(matrix.config.os, 'macos')
with:
toolchain: stable
components: clippy, rustfmt
target: ${{ matrix.config.target }}
- name: Install cargo zigbuild
uses: taiki-e/install-action@2c73a741d1544cc346e9b0af11868feba03eb69d # v2.58.9
with:
tool: cargo-zigbuild
- name: Build wadm
- name: setup for cross-compile builds
if: matrix.config.arch == 'aarch64' && matrix.config.os == 'ubuntu-20.04'
run: |
${{ matrix.config.buildCommand }} --release --bin wadm --target ${{ matrix.config.target }}
sudo apt-get update
sudo apt install gcc-aarch64-linux-gnu g++-aarch64-linux-gnu
rustup toolchain install stable-aarch64-unknown-linux-gnu
rustup target add --toolchain stable-aarch64-unknown-linux-gnu aarch64-unknown-linux-gnu
echo "CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=aarch64-linux-gnu-gcc" >> $GITHUB_ENV
echo "CC_aarch64_unknown_linux_gnu=aarch64-linux-gnu-gcc" >> $GITHUB_ENV
echo "CXX_aarch64_unknown_linux_gnu=aarch64-linux-gnu-g++" >> $GITHUB_ENV
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
- name: Install latest Rust stable toolchain
uses: dtolnay/rust-toolchain@stable
if: matrix.config.arch == 'aarch64' && matrix.config.os == 'ubuntu-20.04'
with:
name: wadm-${{ env.RELEASE_VERSION }}-${{ matrix.config.uploadArtifactSuffix }}
toolchain: stable
components: clippy, rustfmt
target: aarch64-unknown-linux-gnu
- name: build release (amd64 linux, macos, windows)
if: matrix.config.arch != 'aarch64' || startsWith(matrix.config.os, 'macos')
run: 'cargo build --release --bin wadm'
- name: build release (arm64 linux)
if: matrix.config.arch == 'aarch64' && matrix.config.os == 'ubuntu-20.04'
run: 'cargo build --release --bin wadm --target aarch64-unknown-linux-gnu'
- uses: actions/upload-artifact@v3
with:
name: wadm-${{ env.RELEASE_VERSION }}-${{ env.RUNNER_OS }}-${{ matrix.config.arch }}
if-no-files-found: error
path: |
${{ matrix.config.buildOutputPath }}
${{ matrix.config.targetPath }}wadm${{ matrix.config.extension }}
publish:
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
name: publish release assets
runs-on: ubuntu-latest
runs-on: ubuntu-20.04
needs: build
permissions:
contents: write
if: startsWith(github.ref, 'refs/tags/v')
env:
RELEASE_VERSION: ${{ needs.build.outputs.version_output }}
steps:
- name: Download release assets
uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
- name: Prepare release
- name: download release assets
uses: actions/download-artifact@v3
- name: Generate Checksums
run: |
for dir in */; do
test -d "$dir" || continue
tarball="${dir%/}.tar.gz"
tar -czvf "${tarball}" "$dir"
sha256sum "${tarball}" >> SHA256SUMS
cd "$dir" || continue
sum=$(sha256sum * | awk '{ print $1 }')
echo "$dir:$sum" >> checksums-${{ env.RELEASE_VERSION }}.txt
cd ..
done
- name: Create github release
uses: softprops/action-gh-release@72f2c25fcb47643c292f7107632f7a47c1df5cd8 # v2.3.2
- name: Package Binaries
run: for dir in */; do tar -czvf "${dir%/}.tar.gz" "$dir"; done
- name: Publish to GHCR
uses: softprops/action-gh-release@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}
prerelease: false
draft: false
files: |
SHA256SUMS
checksums-${{ env.RELEASE_VERSION }}.txt
wadm-${{ env.RELEASE_VERSION }}-linux-aarch64.tar.gz
wadm-${{ env.RELEASE_VERSION }}-linux-amd64.tar.gz
wadm-${{ env.RELEASE_VERSION }}-macos-aarch64.tar.gz
@ -142,92 +151,77 @@ jobs:
wadm-${{ env.RELEASE_VERSION }}-windows-amd64.tar.gz
crate:
if: ${{ startsWith(github.ref, 'refs/tags/v') || startsWith(github.ref, 'refs/tags/types-v') || startsWith(github.ref, 'refs/tags/client-v') }}
name: Publish crate
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/v') || startsWith(github.ref, 'refs/tags/types-v') || startsWith(github.ref, 'refs/tags/client-v')
needs: build
steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- uses: actions/checkout@v4
- name: Install latest Rust stable toolchain
uses: dtolnay/rust-toolchain@1ff72ee08e3cb84d84adba594e0a297990fc1ed3 # stable
uses: dtolnay/rust-toolchain@stable
with:
toolchain: stable
- name: Cargo login
run: |
cargo login ${{ secrets.CRATES_TOKEN }}
run: cargo login ${{ secrets.CRATES_TOKEN }}
shell: bash
- name: Cargo publish wadm-types
if: ${{ startsWith(github.ref, 'refs/tags/types-v') }}
if: startsWith(github.ref, 'refs/tags/types-v')
run: cargo publish
working-directory: ./crates/wadm-types
run: |
cargo publish
shell: bash
- name: Cargo publish wadm lib
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
if: startsWith(github.ref, 'refs/tags/v')
run: cargo publish
working-directory: ./crates/wadm
run: |
cargo publish
shell: bash
- name: Cargo publish wadm-client
if: ${{ startsWith(github.ref, 'refs/tags/client-v') }}
if: startsWith(github.ref, 'refs/tags/client-v')
run: cargo publish
working-directory: ./crates/wadm-client
run: |
cargo publish
shell: bash
docker-image:
name: Build and push docker images
runs-on: ubuntu-latest
needs: build
permissions:
contents: read
packages: write
env:
RELEASE_VERSION: ${{ needs.build.outputs.version_output }}
steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1
uses: docker/setup-buildx-action@v3
- uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
- uses: actions/download-artifact@v3
with:
name: wadm-${{ env.RELEASE_VERSION }}-linux-aarch64
path: ./artifacts
pattern: '*linux*'
- run: mv ./artifacts/wadm ./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-aarch64 && chmod +x ./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-aarch64
- name: Prepare container artifacts
working-directory: ./artifacts
run: |
for dir in */; do
name="${dir%/}"
mv "${name}/wadm" wadm
chmod +x wadm
rmdir "${name}"
mv wadm "${name}"
done
- uses: actions/download-artifact@v3
with:
name: wadm-${{ env.RELEASE_VERSION }}-linux-amd64
path: ./artifacts
- run: mv ./artifacts/wadm ./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-amd64 && chmod +x ./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-amd64
- name: Login to GitHub Container Registry
uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v3.5.0
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
password: ${{ secrets.WADM_GITHUB_TOKEN }}
- name: lowercase repository owner
run: |
echo "OWNER=${GITHUB_REPOSITORY_OWNER,,}" >>$GITHUB_ENV
- name: Set the formatted release version for the docker tag
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
run: |
echo "RELEASE_VERSION_DOCKER_TAG=${RELEASE_VERSION#v}" >> $GITHUB_ENV
- name: Build and push (tag)
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
uses: docker/build-push-action@v3
if: startsWith(github.ref, 'refs/tags/v')
with:
push: true
platforms: linux/amd64,linux/arm64
@ -235,30 +229,11 @@ jobs:
build-args: |
BIN_ARM64=./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-aarch64
BIN_AMD64=./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-amd64
tags: |
ghcr.io/${{ env.OWNER }}/wadm:latest
ghcr.io/${{ env.OWNER }}/wadm:${{ env.RELEASE_VERSION }},
ghcr.io/${{ env.OWNER }}/wadm:${{ env.RELEASE_VERSION_DOCKER_TAG }}
- name: Build and push wolfi (tag)
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
with:
push: true
platforms: linux/amd64,linux/arm64
context: ./
file: ./Dockerfile.wolfi
build-args: |
BIN_ARM64=./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-aarch64
BIN_AMD64=./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-amd64
tags: |
ghcr.io/${{ env.OWNER }}/wadm:latest-wolfi
ghcr.io/${{ env.OWNER }}/wadm:${{ env.RELEASE_VERSION }}-wolfi
ghcr.io/${{ env.OWNER }}/wadm:${{ env.RELEASE_VERSION_DOCKER_TAG }}-wolfi
tags: ghcr.io/${{ env.OWNER }}/wadm:latest,ghcr.io/${{ env.OWNER }}/wadm:${{ env.RELEASE_VERSION }}
- name: Build and push (main)
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
if: ${{ github.ref == 'refs/heads/main' }}
uses: docker/build-push-action@v3
if: github.ref == 'refs/heads/main'
with:
push: true
platforms: linux/amd64,linux/arm64
@ -267,16 +242,3 @@ jobs:
BIN_ARM64=./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-aarch64
BIN_AMD64=./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-amd64
tags: ghcr.io/${{ env.OWNER }}/wadm:canary
- name: Build and push (main)
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
if: ${{ github.ref == 'refs/heads/main' }}
with:
push: true
platforms: linux/amd64,linux/arm64
context: ./
file: ./Dockerfile.wolfi
build-args: |
BIN_ARM64=./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-aarch64
BIN_AMD64=./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-amd64
tags: ghcr.io/${{ env.OWNER }}/wadm:canary-wolfi

View File

@ -1,73 +0,0 @@
# This workflow uses actions that are not certified by GitHub. They are provided
# by a third-party and are governed by separate terms of service, privacy
# policy, and support documentation.
name: Scorecard supply-chain security
on:
# For Branch-Protection check. Only the default branch is supported. See
# https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection
branch_protection_rule:
# To guarantee Maintained check is occasionally updated. See
# https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained
schedule:
- cron: '28 13 * * 3'
push:
branches: [ "main" ]
# Declare default permissions as read only.
permissions: read-all
jobs:
analysis:
name: Scorecard analysis
runs-on: ubuntu-latest
permissions:
# Needed to upload the results to code-scanning dashboard.
security-events: write
# Needed to publish results and get a badge (see publish_results below).
id-token: write
# Uncomment the permissions below if installing in a private repository.
# contents: read
# actions: read
steps:
- name: "Checkout code"
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
persist-credentials: false
- name: "Run analysis"
uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.4.2
with:
results_file: results.sarif
results_format: sarif
# (Optional) "write" PAT token. Uncomment the `repo_token` line below if:
# - you want to enable the Branch-Protection check on a *public* repository, or
# - you are installing Scorecard on a *private* repository
# To create the PAT, follow the steps in https://github.com/ossf/scorecard-action?tab=readme-ov-file#authentication-with-fine-grained-pat-optional.
# repo_token: ${{ secrets.SCORECARD_TOKEN }}
# Public repositories:
# - Publish results to OpenSSF REST API for easy access by consumers
# - Allows the repository to include the Scorecard badge.
# - See https://github.com/ossf/scorecard-action#publishing-results.
# For private repositories:
# - `publish_results` will always be set to `false`, regardless
# of the value entered here.
publish_results: true
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
# format to the repository Actions tab.
- name: "Upload artifact"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v3.pre.node20
with:
name: SARIF file
path: results.sarif
retention-days: 5
# Upload the results to GitHub's code scanning dashboard (optional).
# Commenting out will disable upload of results to your repo's Code Scanning dashboard
- name: "Upload to code-scanning"
uses: github/codeql-action/upload-sarif@d6bbdef45e766d081b84a2def353b0055f728d3e # v3.29.3
with:
sarif_file: results.sarif

View File

@ -5,9 +5,6 @@ on:
branches:
- main
permissions:
contents: read
jobs:
test:
name: Test
@ -15,55 +12,37 @@ jobs:
strategy:
matrix:
os: [ubuntu-22.04]
nats_version: [2.10.22]
nats_version: [2.10.7]
steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Install latest Rust stable toolchain
uses: dtolnay/rust-toolchain@1ff72ee08e3cb84d84adba594e0a297990fc1ed3 # stable
with:
toolchain: stable
components: clippy, rustfmt
# Cache: rust
- uses: Swatinem/rust-cache@98c8021b550208e191a6a3145459bfc9fb29c4c0 # v2.8.0
with:
key: '${{ matrix.os }}-rust-cache'
- uses: actions/checkout@v4
- name: Check that Wadm JSON Schema is up-to-date
shell: bash
run: |
cargo run --bin wadm-schema
if [ $(git diff --exit-code > /dev/null) ]; then
echo 'Wadm JSON Schema is out of date. Please run `cargo run --bin wadm-schema` and commit the changes.'
exit 1
fi
- name: install wash
uses: taiki-e/install-action@2c73a741d1544cc346e9b0af11868feba03eb69d # v2.58.9
diff ./oam/oam.schema.json ./crates/wadm/oam.schema.json || { echo 'Wadm JSON Schema is out of date. Please copy the file from oam/oam.schema.json to ./crates/wadm/oam.schema.json and commit the change.'; exit 1; }
- name: Install latest Rust stable toolchain
uses: dtolnay/rust-toolchain@stable
with:
tool: wash@0.38.0
toolchain: stable
default: true
components: clippy, rustfmt
# Cache: rust
- uses: Swatinem/rust-cache@v2
with:
key: "${{ matrix.os }}-rust-cache"
- name: Install wash
uses: wasmCloud/common-actions/install-wash@main
# GH Actions doesn't currently support passing args to service containers and there is no way
# to use an environment variable to turn on jetstream for nats, so we manually start it here
- name: Start NATS
run: docker run --rm -d --name wadm-test -p 127.0.0.1:4222:4222 nats:${{ matrix.nats_version }} -js
- name: Build
run: |
cargo build --all-features --all-targets --workspace
# Make sure the wadm crate works well with feature combinations
# The above command builds the workspace and tests with no features
- name: Check wadm crate with features
run: |
cargo check -p wadm --no-default-features
cargo check -p wadm --features cli
cargo check -p wadm --features http_admin
cargo check -p wadm --features cli,http_admin
# Run all tests
- name: Run tests
run: |
cargo test --workspace -- --nocapture
cargo test -- --nocapture

View File

@ -1,47 +0,0 @@
name: wit-wasmcloud-wadm-publish
on:
push:
tags:
- "wit-wasmcloud-wadm-v*"
permissions:
contents: read
jobs:
build:
runs-on: ubuntu-latest
permissions:
contents: write
packages: write
steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
sparse-checkout: |
wit
.github
- name: Extract tag context
id: ctx
run: |
version=${GITHUB_REF_NAME#wit-wasmcloud-wadm-v}
echo "version=${version}" >> "$GITHUB_OUTPUT"
echo "tarball=wit-wasmcloud-wadm-${version}.tar.gz" >> "$GITHUB_OUTPUT"
echo "version is ${version}"
- uses: ./.github/actions/configure-wkg
with:
oci-username: ${{ github.repository_owner }}
oci-password: ${{ secrets.GITHUB_TOKEN }}
- name: Build
run: wkg wit build --wit-dir wit/wadm -o package.wasm
- name: Push version-tagged WebAssembly binary to GHCR
run: wkg publish package.wasm
- name: Package tarball for release
run: |
mkdir -p release/wit
cp wit/wadm/*.wit release/wit/
tar cvzf ${{ steps.ctx.outputs.tarball }} -C release wit
- name: Release
uses: softprops/action-gh-release@72f2c25fcb47643c292f7107632f7a47c1df5cd8 # v2.3.2
with:
files: ${{ steps.ctx.outputs.tarball }}
make_latest: "false"

7
.gitignore vendored
View File

@ -1,5 +1,5 @@
/target
tests/e2e_log/
test/e2e_log/
*.dump
@ -8,7 +8,4 @@ tests/e2e_log/
# Ignore IDE specific files
.idea/
.vscode/
.direnv/
result
.vscode/

3288
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,17 +1,13 @@
[package]
name = "wadm-cli"
description = "wasmCloud Application Deployment Manager: A tool for running Wasm applications in wasmCloud"
version.workspace = true
version = "0.12.0"
edition = "2021"
authors = ["wasmCloud Team"]
keywords = ["webassembly", "wasmcloud", "wadm"]
license = "Apache-2.0"
readme = "README.md"
repository = "https://github.com/wasmcloud/wadm"
default-run = "wadm"
[workspace.package]
version = "0.21.0"
[features]
default = []
@ -23,7 +19,11 @@ members = ["crates/*"]
[dependencies]
anyhow = { workspace = true }
async-nats = { workspace = true }
async-trait = { workspace = true }
clap = { workspace = true, features = ["derive", "cargo", "env"] }
futures = { workspace = true }
nkeys = { workspace = true }
# One version back to avoid clashes with 0.10 of otlp
opentelemetry = { workspace = true, features = ["rt-tokio"] }
# 0.10 to avoid protoc dep
@ -31,32 +31,30 @@ opentelemetry-otlp = { workspace = true, features = [
"http-proto",
"reqwest-client",
] }
schemars = { workspace = true }
serde_json = { workspace = true }
tokio = { workspace = true, features = ["full"] }
tracing = { workspace = true, features = ["log"] }
tracing-futures = { workspace = true }
tracing-opentelemetry = { workspace = true }
tracing-subscriber = { workspace = true, features = ["env-filter", "json"] }
wadm = { workspace = true, features = ["cli", "http_admin"] }
wasmcloud-control-interface = { workspace = true }
wadm = { workspace = true }
wadm-types = { workspace = true }
[workspace.dependencies]
anyhow = "1"
async-nats = "0.39"
async-nats = "0.33"
async-trait = "0.1"
base64 = "0.21.2"
bytes = "1"
chrono = "0.4"
clap = { version = "4", features = ["derive", "cargo", "env"] }
cloudevents-sdk = "0.8"
cloudevents-sdk = "0.7"
futures = "0.3"
http = { version = "1", default-features = false }
http-body-util = { version = "0.1", default-features = false }
hyper = { version = "1", default-features = false }
hyper-util = { version = "0.1", default-features = false }
indexmap = { version = "2", features = ["serde"] }
jsonschema = "0.29"
jsonschema = "0.17"
lazy_static = "1"
nkeys = "0.4.5"
nkeys = "0.3.0"
once_cell = "1"
# One version back to avoid clashes with 0.10 of otlp
opentelemetry = { version = "0.17", features = ["rt-tokio"] }
# 0.10 to avoid protoc dep
@ -64,55 +62,35 @@ opentelemetry-otlp = { version = "0.10", features = [
"http-proto",
"reqwest-client",
] }
rand = { version = "0.9", features = ["small_rng"] }
# NOTE(thomastaylor312): Pinning this temporarily to 1.10 due to transitive dependency with oci
# crates that are pinned to 1.10
regex = "~1.10"
schemars = "0.8"
semver = { version = "1.0.25", features = ["serde"] }
rand = { version = "0.8", features = ["small_rng"] }
regex = "1.9.3"
semver = { version = "1.0.16", features = ["serde"] }
serde = "1"
serde_json = "1"
serde_yaml = "0.9"
sha2 = "0.10.9"
thiserror = "2"
sha2 = "0.10.2"
thiserror = "1"
tokio = { version = "1", default-features = false }
tracing = { version = "0.1", features = ["log"] }
tracing-futures = "0.2"
tracing-opentelemetry = { version = "0.17" }
tracing-subscriber = { version = "0.3.7", features = ["env-filter", "json"] }
ulid = { version = "1", features = ["serde"] }
utoipa = "5"
uuid = "1"
wadm = { version = "0.21", path = "./crates/wadm" }
wadm-client = { version = "0.10", path = "./crates/wadm-client" }
wadm-types = { version = "0.8", path = "./crates/wadm-types" }
wasmcloud-control-interface = "2.4.0"
wasmcloud-secrets-types = "0.5.0"
wit-bindgen-wrpc = { version = "0.9", default-features = false }
wit-bindgen = { version = "0.36.0", default-features = false }
wadm = { version = "0.12.0", path = "./crates/wadm" }
wadm-client = { version = "0.1.0", path = "./crates/wadm-client" }
wadm-types = { version = "0.1.0", path = "./crates/wadm-types" }
wasmcloud-control-interface = "1.0.0"
[dev-dependencies]
async-nats = { workspace = true }
base64 = { workspace = true }
chrono = { workspace = true }
futures = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
serde_yaml = { workspace = true }
serial_test = "3"
serial_test = "1"
wadm-client = { workspace = true }
wadm-types = { workspace = true }
wasmcloud-control-interface = { workspace = true }
testcontainers = "0.25"
[build-dependencies]
schemars = { workspace = true }
serde_json = { workspace = true }
wadm-types = { workspace = true }
[[bin]]
name = "wadm"
path = "src/main.rs"
[[bin]]
name = "wadm-schema"
path = "src/schema.rs"

View File

@ -1,17 +0,0 @@
FROM cgr.dev/chainguard/wolfi-base:latest AS base
FROM base AS base-amd64
ARG BIN_AMD64
ARG BIN=$BIN_AMD64
FROM base AS base-arm64
ARG BIN_ARM64
ARG BIN=$BIN_ARM64
FROM base-$TARGETARCH
# Copy application binary from disk
COPY ${BIN} /usr/local/bin/wadm
# Run the application
ENTRYPOINT ["/usr/local/bin/wadm"]

View File

@ -1,25 +0,0 @@
# MAINTAINERS
The following individuals are responsible for reviewing code, managing issues, and ensuring the overall quality of `wadm`.
## @wasmCloud/wadm-maintainers
Name: Joonas Bergius
GitHub: @joonas
Organization: Cosmonic
Name: Dan Norris
GitHub: @protochron
Organization: Cosmonic
Name: Taylor Thomas
GitHub: @thomastaylor312
Organization: Cosmonic
Name: Ahmed Tadde
GitHub: @ahmedtadde
Organization: PreciseTarget
Name: Brooks Townsend
GitHub: @brooksmtownsend
Organization: Cosmonic

View File

@ -7,13 +7,6 @@ MAKEFLAGS += --no-builtin-rules
MAKEFLAGS += --no-print-directory
MAKEFLAGS += -S
OS_NAME := $(shell uname -s | tr '[:upper:]' '[:lower:]')
ifeq ($(OS_NAME),darwin)
NC_FLAGS := -czt
else
NC_FLAGS := -Czt
endif
.DEFAULT: help
CARGO ?= cargo
@ -77,7 +70,7 @@ build-docker: ## Build wadm docker image
CARGO_TEST_TARGET ?=
test:: ## Run tests
ifeq ($(shell nc $(NC_FLAGS) -w1 127.0.0.1 4222 || echo fail),fail)
ifeq ($(shell nc -czt -w1 127.0.0.1 4222 || echo fail),fail)
$(DOCKER) run --rm -d --name wadm-test -p 127.0.0.1:4222:4222 nats:2.10 -js
$(CARGO) test $(CARGO_TEST_TARGET) -- --nocapture
$(DOCKER) stop wadm-test
@ -86,19 +79,19 @@ else
endif
test-e2e:: ## Run e2e tests
ifeq ($(shell nc $(NC_FLAGS) -w1 127.0.0.1 4222 || echo fail),fail)
ifeq ($(shell nc -czt -w1 127.0.0.1 4222 || echo fail),fail)
@$(MAKE) build
@# Reenable this once we've enabled all tests
@# RUST_BACKTRACE=1 $(CARGO) test --test e2e_multitenant --features _e2e_tests -- --nocapture
RUST_BACKTRACE=1 $(CARGO) test --test e2e_multiple_hosts --features _e2e_tests -- --nocapture
RUST_BACKTRACE=1 $(CARGO) test --test e2e_upgrades --features _e2e_tests -- --nocapture
@# RUST_BACKTRACE=1 $(CARGO) test --test e2e_upgrades --features _e2e_tests -- --nocapture
else
@echo "WARN: Not running e2e tests. NATS must not be currently running"
exit 1
endif
test-individual-e2e:: ## Runs an individual e2e test based on the WADM_E2E_TEST env var
ifeq ($(shell nc $(NC_FLAGS) -w1 127.0.0.1 4222 || echo fail),fail)
ifeq ($(shell nc -czt -w1 127.0.0.1 4222 || echo fail),fail)
@$(MAKE) build
RUST_BACKTRACE=1 $(CARGO) test --test $(WADM_E2E_TEST) --features _e2e_tests -- --nocapture
else
@ -113,8 +106,9 @@ endif
stream-cleanup: ## Removes all streams that wadm creates
-$(NATS) stream del wadm_commands --force
-$(NATS) stream del wadm_events --force
-$(NATS) stream del wadm_event_consumer --force
-$(NATS) stream del wadm_notify --force
-$(NATS) stream del wadm_mirror --force
-$(NATS) stream del wadm_multitenant_mirror --force
-$(NATS) stream del wadm_status --force
-$(NATS) stream del KV_wadm_state --force
-$(NATS) stream del KV_wadm_manifests --force

View File

@ -2,18 +2,10 @@
# wasmCloud Application Deployment Manager (wadm)
Wadm is a Wasm-native orchestrator for managing and scaling declarative wasmCloud applications.
## Responsibilities
**wadm** is powerful because it focuses on a small set of core responsibilities, making it efficient and easy to manage.
- **Manage application specifications** - Manage applications which represent _desired state_. This includes
the creation, deletion, upgrades and rollback of applications to previous versions. Application
specifications are defined using the [Open Application Model](https://oam.dev/). For more
information on wadm's specific OAM features, see our [OAM README](./oam/README.md).
- **Observe state** - Monitor wasmCloud [CloudEvents](https://wasmcloud.com/docs/reference/cloud-event-list) from all hosts in a [lattice](https://wasmcloud.com/docs/deployment/lattice/) to build the current state.
- **Reconcile with compensating commands** - When the current state doesn't match the desired state, issue commands to wasmCloud hosts in the lattice with the [control interface](https://wasmcloud.com/docs/hosts/lattice-protocols/control-interface) to reach desired state. Wadm is constantly reconciling and will react immediately to ensure applications stay deployed. For example, if a host stops, wadm will reconcile the `host_stopped` event and issue any necessary commands to start components on other available hosts.
The wasmCloud Application Deployment Manager (**wadm**) enables declarative wasmCloud applications.
It's responsible for managing a set of application deployment specifications, monitoring the current
state of an entire [lattice](https://wasmcloud.com/docs/deployment/lattice/), and issuing the
appropriate lattice control commands required to close the gap between observed and desired state.
## Using wadm
@ -23,7 +15,7 @@ Wadm is a Wasm-native orchestrator for managing and scaling declarative wasmClou
You can easily run **wadm** by downloading the [`wash`](https://wasmcloud.com/docs/installation) CLI, which automatically launches wadm alongside NATS and a wasmCloud host when you run `wash up`. You can use `wash` to query, create, and deploy applications.
```bash
```
wash up -d # Start NATS, wasmCloud, and wadm in the background
```
@ -31,7 +23,7 @@ Follow the [wasmCloud quickstart](https://wasmcloud.com/docs/tour/hello-world) t
If you prefer to run **wadm** separately and/or connect to running wasmCloud hosts, you can instead opt for using the latest GitHub release artifact and executing the binary. Simply replace the latest version, your operating system, and architecture below. Please note that wadm requires a wasmCloud host version >=0.63.0
```bash
```
# Install wadm
curl -fLO https://github.com/wasmCloud/wadm/releases/download/<version>/wadm-<version>-<os>-<arch>.tar.gz
tar -xvf wadm-<version>-<os>-<arch>.tar.gz
@ -58,17 +50,17 @@ spec:
type: component
properties:
# Run components from OCI registries as below or from a local .wasm component binary.
image: ghcr.io/wasmcloud/components/http-hello-world-rust:0.1.0
image: wasmcloud.azurecr.io/http-hello-world:0.1.0
traits:
# One replica of this component will run
- type: spreadscaler
properties:
instances: 1
replicas: 1
# The httpserver capability provider, started from the official wasmCloud OCI artifact
- name: httpserver
type: capability
properties:
image: ghcr.io/wasmcloud/http-server:0.22.0
image: ghcr.io/wasmcloud/http-server:0.20.0
traits:
# Link the HTTP server and set it to listen on the local machine's port 8080
- type: link
@ -77,16 +69,15 @@ spec:
namespace: wasi
package: http
interfaces: [incoming-handler]
source:
config:
- name: default-http
properties:
ADDRESS: 127.0.0.1:8080
source_config:
- name: default-http
properties:
ADDRESS: 127.0.0.1:8080
```
Then use `wash` to deploy the manifest:
```bash
```
wash app deploy hello.yaml
```
@ -94,13 +85,13 @@ wash app deploy hello.yaml
When you're done, you can use `wash` to undeploy the application:
```bash
```
wash app undeploy hello-world
```
### Modifying applications
**wadm** supports upgrading applications by deploying new versions of manifests. Try changing the manifest you created above by updating the number of instances.
**wadm** supports upgrading applications by deploying new versions of manifests. Try changing the manifest you created above by updating the number of replicas.
```yaml
<<ELIDED>>
@ -113,25 +104,38 @@ spec:
- name: http-component
type: component
properties:
image: ghcr.io/wasmcloud/components/http-hello-world-rust:0.1.0
image: wasmcloud.azurecr.io/http-hello-world:0.1.0
traits:
- type: spreadscaler
properties:
instances: 10 # Let's have 10!
replicas: 10 # Let's have 10!
<<ELIDED>>
```
Then simply deploy the new manifest:
```bash
```
wash app deploy hello.yaml
```
Now wasmCloud is configured to automatically scale your component to 10 instances based on incoming load.
Now wasmCloud is configured to automatically scale your component to 10 replicas based on incoming load.
## Responsibilities
**wadm** has a very small set of responsibilities, which actually contributes to its power.
- **Manage Application Specifications** - Manage models consisting of _desired state_. This includes
the creation and deletion and _rollback_ of models to previous versions. Application
specifications are defined using the [Open Application Model](https://oam.dev/). For more
information on wadm's specific OAM features, see our [OAM README](./oam/README.md).
- **Observe State** - Monitor wasmCloud [CloudEvents](https://wasmcloud.com/docs/reference/cloud-event-list) from all hosts in a lattice to build the current state.
- **Take Compensating Actions** - When indicated, issue commands to the [lattice control
interface](https://github.com/wasmCloud/interfaces/tree/main/lattice-control) to bring about the
changes necessary to make the desired and observed state match.
## 🚧 Advanced
You can find a Docker Compose file for deploying an end-to-end multi-tenant example in the [test](https://github.com/wasmCloud/wadm/blob/main/tests/docker-compose-e2e-multitenant.yaml) directory.
You can find a Docker Compose file for deploying an end-to-end multi-tenant example in the [test](https://github.com/wasmCloud/wadm/blob/main/test/docker-compose-e2e-multitenant.yaml) directory.
In advanced use cases, **wadm** is also capable of:
@ -146,7 +150,21 @@ a single lattice. Proceed with caution while we do further testing.
Interacting with **wadm** is done over NATS on the root topic `wadm.api.{prefix}` where `prefix` is
the lattice namespace prefix. For more information on this API, please consult the [wadm
Reference](https://wasmcloud.com/docs/ecosystem/wadm/).
Reference](https://wasmcloud.dev/reference/wadm).
## Known Issues/Missing functionality
As this is a new project there are some things we know are missing or buggy. A non-exhaustive list
of these can be found below:
- It is _technically_ possible as things stand right now for a race condition with manifests when a
manifest is updated/created and deleted simultaneously. In this case, one of the operations will
win and you will end up with a manifest that still exists after you delete it or a manifest that
does not exist after you create it. This is a very unlikely scenario as only one person or process
is interacting with a specific, but it is possible. If this becomes a problem for you, please let
us know and we will consider additional ways of how we can address it.
- Manifest validation is implemented, but slightly clunky. Any PRs that make this better would be
more than welcome!
## References

View File

@ -1,3 +0,0 @@
# Reporting a security issue
Please refer to the [wasmCloud Security Process and Policy](https://github.com/wasmCloud/wasmCloud/blob/main/SECURITY.md) for details on how to report security issues and vulnerabilities.

View File

@ -15,10 +15,10 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: '0.2.10'
version: "0.2.2"
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: 'v0.21.0'
appVersion: "v0.11.0"

View File

@ -0,0 +1,4 @@
wadm:
config:
nats:
server: "nats.default.svc.cluster.local:4222"

View File

@ -36,15 +36,10 @@ Common labels
{{- define "wadm.labels" -}}
helm.sh/chart: {{ include "wadm.chart" . }}
{{ include "wadm.selectorLabels" . }}
app.kubernetes.io/component: wadm
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/part-of: wadm
{{- with .Values.additionalLabels }}
{{ . | toYaml }}
{{- end }}
{{- end }}
{{/*
@ -55,15 +50,6 @@ app.kubernetes.io/name: {{ include "wadm.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{- define "wadm.nats.server" -}}
- name: WADM_NATS_SERVER
{{- if .Values.wadm.config.nats.server }}
value: {{ .Values.wadm.config.nats.server | quote }}
{{- else }}
value: nats-headless.{{ .Release.Namespace }}.svc.cluster.local
{{- end }}
{{- end }}
{{- define "wadm.nats.auth" -}}
{{- if .Values.wadm.config.nats.creds.secretName -}}
- name: WADM_NATS_CREDS_FILE
@ -103,4 +89,4 @@ volumes:
path: "nats.creds"
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -5,7 +5,7 @@ metadata:
labels:
{{- include "wadm.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicas }}
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "wadm.selectorLabels" . | nindent 6 }}
@ -34,7 +34,8 @@ spec:
image: "{{ .Values.wadm.image.repository }}:{{ .Values.wadm.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.wadm.image.pullPolicy }}
env:
{{- include "wadm.nats.server" . | nindent 12 }}
- name: WADM_NATS_SERVER
value: {{ .Values.wadm.config.nats.server | quote }}
{{- include "wadm.nats.auth" . | nindent 12 }}
{{- if .Values.wadm.config.nats.tlsCaFile }}
- name: WADM_NATS_TLS_CA_FILE
@ -56,9 +57,9 @@ spec:
- name: WADM_TRACING_ENDPOINT
value: {{ .Values.wadm.config.tracingEndpoint | quote }}
{{- end }}
{{- if .Values.wadm.config.nats.jetstreamDomain }}
{{- if .Values.wadm.config.jetstreamDomain }}
- name: WADM_JETSTREAM_DOMAIN
value: {{ .Values.wadm.config.nats.jetstreamDomain | quote }}
value: {{ .Values.wadm.config.jetstreamDomain | quote }}
{{- end }}
{{- if .Values.wadm.config.maxJobs }}
- name: WADM_MAX_JOBS

View File

@ -14,7 +14,7 @@ wadm:
hostId: ""
logLevel: ""
nats:
server: ""
server: "127.0.0.1:4222"
jetstreamDomain: ""
tlsCaFile: ""
creds:
@ -34,9 +34,6 @@ imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
additionalLabels: {}
# app: wadm
serviceAccount:
# Specifies whether a service account should be created
create: true

View File

@ -1,7 +1,7 @@
[package]
name = "wadm-client"
description = "A client library for interacting with the wadm API"
version = "0.10.0"
version = "0.1.1"
edition = "2021"
authors = ["wasmCloud Team"]
keywords = ["webassembly", "wasmcloud", "wadm"]
@ -11,10 +11,14 @@ repository = "https://github.com/wasmcloud/wadm"
[dependencies]
anyhow = { workspace = true }
async-nats = { workspace = true }
futures = { workspace = true }
bytes = { workspace = true }
nkeys = { workspace = true }
once_cell = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
serde_yaml = { workspace = true }
thiserror = { workspace = true }
tokio = { workspace = true, features = ["full"] }
tracing = { workspace = true, features = ["log"] }
tracing-futures = { workspace = true }
wadm-types = { workspace = true }

View File

@ -1,10 +1,13 @@
//! A client for interacting with Wadm.
use std::path::PathBuf;
use std::sync::{Arc, OnceLock};
use std::sync::Arc;
use async_nats::{HeaderMap, Message};
use async_nats::HeaderMap;
pub use error::Result;
use error::{ClientError, SerializationError};
use futures::Stream;
pub use loader::ManifestLoader;
use once_cell::sync::Lazy;
use topics::TopicGenerator;
use wadm_types::{
api::{
@ -19,21 +22,14 @@ use wadm_types::{
mod nats;
pub mod error;
pub use error::Result;
pub mod loader;
pub use loader::ManifestLoader;
pub mod topics;
/// Headers for `Content-Type: application/json`
static HEADERS_CONTENT_TYPE_JSON: OnceLock<HeaderMap> = OnceLock::new();
/// Retrieve static content type headers
fn get_headers_content_type_json() -> &'static HeaderMap {
HEADERS_CONTENT_TYPE_JSON.get_or_init(|| {
let mut headers = HeaderMap::new();
headers.insert("Content-Type", "application/json");
headers
})
}
static CONTENT_TYPE_HEADERS: Lazy<HeaderMap> = Lazy::new(|| {
let mut headers = HeaderMap::new();
headers.insert("Content-Type", "application/json");
headers
});
#[derive(Clone)]
pub struct Client {
@ -107,11 +103,7 @@ impl Client {
let topic = self.topics.model_put_topic();
let resp = self
.client
.request_with_headers(
topic,
get_headers_content_type_json().clone(),
manifest_bytes.into(),
)
.request_with_headers(topic, CONTENT_TYPE_HEADERS.clone(), manifest_bytes.into())
.await?;
let body: PutModelResponse =
serde_json::from_slice(&resp.payload).map_err(SerializationError::from)?;
@ -205,13 +197,7 @@ impl Client {
///
/// Please note that an OK response does not necessarily mean that the manifest was deployed
/// successfully, just that the server accepted the deployment request.
///
/// Returns a tuple of the name and version of the manifest that was deployed
pub async fn deploy_manifest(
&self,
name: &str,
version: Option<&str>,
) -> Result<(String, Option<String>)> {
pub async fn deploy_manifest(&self, name: &str, version: Option<&str>) -> Result<()> {
let topic = self.topics.model_deploy_topic(name);
let body = if let Some(version) = version {
serde_json::to_vec(&DeployModelRequest {
@ -227,7 +213,7 @@ impl Client {
match body.result {
DeployResult::Error => Err(ClientError::ApiError(body.message)),
DeployResult::NotFound => Err(ClientError::NotFound(name.to_string())),
DeployResult::Acknowledged => Ok((body.name, body.version)),
DeployResult::Acknowledged => Ok(()),
}
}
@ -249,8 +235,8 @@ impl Client {
/// Undeploys the given manifest from the lattice
///
/// Returns Ok(manifest_name) if the manifest undeploy request was acknowledged
pub async fn undeploy_manifest(&self, name: &str) -> Result<String> {
/// Returns Ok if the manifest undeploy request was acknowledged
pub async fn undeploy_manifest(&self, name: &str) -> Result<()> {
let topic = self.topics.model_undeploy_topic(name);
let resp = self
.client
@ -261,7 +247,7 @@ impl Client {
match body.result {
DeployResult::Error => Err(ClientError::ApiError(body.message)),
DeployResult::NotFound => Err(ClientError::NotFound(name.to_string())),
DeployResult::Acknowledged => Ok(body.name),
DeployResult::Acknowledged => Ok(()),
}
}
@ -283,15 +269,6 @@ impl Client {
}
}
/// Subscribes to the status of a given manifest
pub async fn subscribe_to_status(&self, name: &str) -> Result<impl Stream<Item = Message>> {
let subject = self.topics.wadm_status_topic(name);
let subscriber = self
.client
.subscribe(subject)
.await
.map_err(|e| ClientError::ApiError(e.to_string()))?;
Ok(subscriber)
}
// TODO(thomastaylor312): It would probably be nice to add a helper that can subscribe to a
// status topic and return a stream of status updates. But that can be added later.
}

View File

@ -1,4 +1,4 @@
use wadm_types::api::{DEFAULT_WADM_TOPIC_PREFIX, WADM_STATUS_API_PREFIX};
use wadm_types::api::DEFAULT_WADM_TOPIC_PREFIX;
/// A generator that uses various config options to generate the proper topic names for the wadm API
pub struct TopicGenerator {
@ -44,7 +44,7 @@ impl TopicGenerator {
/// Returns the full topic for a model delete operation
pub fn model_delete_topic(&self, model_name: &str) -> String {
format!("{}.del.{model_name}", self.model_prefix())
format!("{}.delete.{model_name}", self.model_prefix())
}
/// Returns the full topic for a model list operation
@ -71,11 +71,4 @@ impl TopicGenerator {
pub fn model_status_topic(&self, model_name: &str) -> String {
format!("{}.status.{model_name}", self.model_prefix())
}
/// Returns the full topic for WADM status subscriptions
pub fn wadm_status_topic(&self, app_name: &str) -> String {
// Extract just the lattice name from topic_prefix
let lattice = self.topic_prefix.split('.').last().unwrap_or("default");
format!("{}.{}.{}", WADM_STATUS_API_PREFIX, lattice, app_name)
}
}

View File

@ -1,28 +1,37 @@
[package]
name = "wadm-types"
description = "Types and validators for the wadm API"
version = "0.8.3"
version = "0.1.0"
edition = "2021"
authors = ["wasmCloud Team"]
keywords = ["webassembly", "wasmcloud", "wadm"]
license = "Apache-2.0"
repository = "https://github.com/wasmcloud/wadm"
[features]
wit = []
[dependencies]
anyhow = { workspace = true }
async-nats = { workspace = true }
async-trait = { workspace = true }
base64 = { workspace = true }
bytes = { workspace = true }
chrono = { workspace = true }
cloudevents-sdk = { workspace = true }
futures = { workspace = true }
indexmap = { workspace = true, features = ["serde"] }
jsonschema = { workspace = true }
lazy_static = { workspace = true }
nkeys = { workspace = true }
rand = { workspace = true, features = ["small_rng"] }
regex = { workspace = true }
schemars = { workspace = true }
serde = { workspace = true, features = ["derive"] }
semver = { workspace = true, features = ["serde"] }
serde = { workspace = true }
serde_json = { workspace = true }
serde_yaml = { workspace = true }
utoipa = { workspace = true }
[target.'cfg(not(target_family = "wasm"))'.dependencies]
sha2 = { workspace = true }
thiserror = { workspace = true }
tokio = { workspace = true, features = ["full"] }
wit-bindgen-wrpc = { workspace = true }
[target.'cfg(target_family = "wasm")'.dependencies]
wit-bindgen = { workspace = true, features = ["macros"] }
tracing = { workspace = true, features = ["log"] }
tracing-futures = { workspace = true }
ulid = { workspace = true, features = ["serde"] }
uuid = { workspace = true }
wasmcloud-control-interface = { workspace = true }

View File

@ -4,7 +4,6 @@ use crate::Manifest;
/// The default topic prefix for the wadm API;
pub const DEFAULT_WADM_TOPIC_PREFIX: &str = "wadm.api";
pub const WADM_STATUS_API_PREFIX: &str = "wadm.status";
/// The request body for getting a manifest
#[derive(Debug, Serialize, Deserialize)]
@ -23,14 +22,6 @@ pub struct GetModelResponse {
pub manifest: Option<Manifest>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct ListModelsResponse {
pub result: GetResult,
#[serde(default)]
pub message: String,
pub models: Vec<ModelSummary>,
}
/// Possible outcomes of a get request
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "lowercase")]
@ -70,11 +61,7 @@ pub struct ModelSummary {
pub version: String,
pub description: Option<String>,
pub deployed_version: Option<String>,
#[serde(default)]
pub detailed_status: Status,
#[deprecated(since = "0.14.0", note = "Use detailed_status instead")]
pub status: StatusType,
#[deprecated(since = "0.14.0", note = "Use detailed_status instead")]
pub status_message: Option<String>,
}
@ -135,10 +122,6 @@ pub struct DeployModelResponse {
pub result: DeployResult,
#[serde(default)]
pub message: String,
#[serde(default)]
pub name: String,
#[serde(default)]
pub version: Option<String>,
}
/// All possible outcomes of a deploy operation
@ -176,46 +159,27 @@ pub enum StatusResult {
}
/// The current status of a model
#[derive(Debug, Serialize, Deserialize, Default, Clone, PartialEq, Eq)]
#[derive(Debug, Serialize, Deserialize, Default, Clone)]
pub struct Status {
pub version: String,
#[serde(rename = "status")]
pub info: StatusInfo,
#[serde(skip_serializing_if = "Vec::is_empty", default)]
pub scalers: Vec<ScalerStatus>,
#[serde(default)]
#[deprecated(since = "0.14.0")]
pub version: String,
#[serde(default)]
#[deprecated(since = "0.14.0")]
pub components: Vec<ComponentStatus>,
}
impl Status {
pub fn new(info: StatusInfo, scalers: Vec<ScalerStatus>) -> Self {
#[allow(deprecated)]
Status {
info,
scalers,
version: String::with_capacity(0),
components: Vec::with_capacity(0),
}
}
}
/// The current status of a component
#[derive(Debug, Serialize, Deserialize, Default, Clone, Eq, PartialEq)]
#[derive(Debug, Serialize, Deserialize, Default, Clone)]
pub struct ComponentStatus {
pub name: String,
#[serde(rename = "type")]
pub component_type: String,
#[serde(rename = "status")]
pub info: StatusInfo,
#[serde(skip_serializing_if = "Vec::is_empty", default)]
pub traits: Vec<TraitStatus>,
}
/// The current status of a trait
#[derive(Debug, Serialize, Deserialize, Default, Clone, Eq, PartialEq)]
#[derive(Debug, Serialize, Deserialize, Default, Clone)]
pub struct TraitStatus {
#[serde(rename = "type")]
pub trait_type: String,
@ -223,22 +187,6 @@ pub struct TraitStatus {
pub info: StatusInfo,
}
/// The current status of a scaler
#[derive(Debug, Serialize, Deserialize, Default, Clone, Eq, PartialEq)]
pub struct ScalerStatus {
/// The id of the scaler
#[serde(default)]
pub id: String,
/// The kind of scaler
#[serde(default)]
pub kind: String,
/// The human-readable name of the scaler
#[serde(default)]
pub name: String,
#[serde(rename = "status")]
pub info: StatusInfo,
}
/// Common high-level status information
#[derive(Debug, Serialize, Deserialize, Default, Clone, Eq, PartialEq)]
pub struct StatusInfo {
@ -276,27 +224,12 @@ impl StatusInfo {
message: message.to_owned(),
}
}
pub fn waiting(message: &str) -> Self {
StatusInfo {
status_type: StatusType::Waiting,
message: message.to_owned(),
}
}
pub fn unhealthy(message: &str) -> Self {
StatusInfo {
status_type: StatusType::Unhealthy,
message: message.to_owned(),
}
}
}
/// All possible status types
#[derive(Debug, Serialize, Deserialize, Eq, PartialEq, Clone, Copy, Default)]
#[serde(rename_all = "lowercase")]
pub enum StatusType {
Waiting,
#[default]
Undeployed,
#[serde(alias = "compensating")]
@ -304,7 +237,6 @@ pub enum StatusType {
#[serde(alias = "ready")]
Deployed,
Failed,
Unhealthy,
}
// Implementing add makes it easy for use to get an aggregate status by summing all of them together
@ -327,15 +259,9 @@ impl std::ops::Add for StatusType {
// If anything is undeployed, the whole thing is
(Self::Undeployed, _) => Self::Undeployed,
(_, Self::Undeployed) => Self::Undeployed,
// If anything is waiting, the whole thing is
(Self::Waiting, _) => Self::Waiting,
(_, Self::Waiting) => Self::Waiting,
(Self::Reconciling, _) => Self::Reconciling,
(_, Self::Reconciling) => Self::Reconciling,
(Self::Unhealthy, _) => Self::Unhealthy,
(_, Self::Unhealthy) => Self::Unhealthy,
// This is technically covered in the first comparison, but we'll be explicit
(Self::Deployed, Self::Deployed) => Self::Deployed,
_ => unreachable!("aggregating StatusType failure. This is programmer error"),
}
}
}
@ -401,20 +327,6 @@ mod test {
StatusType::Failed
));
assert!(matches!(
[StatusType::Deployed, StatusType::Unhealthy]
.into_iter()
.sum(),
StatusType::Unhealthy
));
assert!(matches!(
[StatusType::Reconciling, StatusType::Unhealthy]
.into_iter()
.sum(),
StatusType::Reconciling
));
let empty: Vec<StatusType> = Vec::new();
assert!(matches!(empty.into_iter().sum(), StatusType::Undeployed));
}

View File

@ -1,621 +0,0 @@
use crate::{
api::{
ComponentStatus, DeleteResult, GetResult, ModelSummary, PutResult, Status, StatusInfo,
StatusResult, StatusType, TraitStatus, VersionInfo,
},
CapabilityProperties, Component, ComponentProperties, ConfigDefinition, ConfigProperty,
LinkProperty, Manifest, Metadata, Policy, Properties, SecretProperty, SecretSourceProperty,
SharedApplicationComponentProperties, Specification, Spread, SpreadScalerProperty,
TargetConfig, Trait, TraitProperty,
};
use wasmcloud::wadm;
#[cfg(all(feature = "wit", target_family = "wasm"))]
wit_bindgen::generate!({
path: "wit",
additional_derives: [
serde::Serialize,
serde::Deserialize,
],
with: {
"wasmcloud:wadm/types@0.2.0": generate,
"wasmcloud:wadm/client@0.2.0": generate,
"wasmcloud:wadm/handler@0.2.0": generate
}
});
#[cfg(all(feature = "wit", not(target_family = "wasm")))]
wit_bindgen_wrpc::generate!({
generate_unused_types: true,
additional_derives: [
serde::Serialize,
serde::Deserialize,
],
with: {
"wasmcloud:wadm/types@0.2.0": generate,
"wasmcloud:wadm/client@0.2.0": generate,
"wasmcloud:wadm/handler@0.2.0": generate
}
});
// Trait implementations for converting types in the API module to the generated types
impl From<Manifest> for wadm::types::OamManifest {
fn from(manifest: Manifest) -> Self {
wadm::types::OamManifest {
api_version: manifest.api_version.to_string(),
kind: manifest.kind.to_string(),
metadata: manifest.metadata.into(),
spec: manifest.spec.into(),
}
}
}
impl From<Metadata> for wadm::types::Metadata {
fn from(metadata: Metadata) -> Self {
wadm::types::Metadata {
name: metadata.name,
annotations: metadata.annotations.into_iter().collect(),
labels: metadata.labels.into_iter().collect(),
}
}
}
impl From<Specification> for wadm::types::Specification {
fn from(spec: Specification) -> Self {
wadm::types::Specification {
components: spec.components.into_iter().map(|c| c.into()).collect(),
policies: spec.policies.into_iter().map(|c| c.into()).collect(),
}
}
}
impl From<Component> for wadm::types::Component {
fn from(component: Component) -> Self {
wadm::types::Component {
name: component.name,
properties: component.properties.into(),
traits: component
.traits
.map(|traits| traits.into_iter().map(|t| t.into()).collect()),
}
}
}
impl From<Policy> for wadm::types::Policy {
fn from(policy: Policy) -> Self {
wadm::types::Policy {
name: policy.name,
properties: policy.properties.into_iter().collect(),
type_: policy.policy_type,
}
}
}
impl From<Properties> for wadm::types::Properties {
fn from(properties: Properties) -> Self {
match properties {
Properties::Component { properties } => {
wadm::types::Properties::Component(properties.into())
}
Properties::Capability { properties } => {
wadm::types::Properties::Capability(properties.into())
}
}
}
}
impl From<ComponentProperties> for wadm::types::ComponentProperties {
fn from(properties: ComponentProperties) -> Self {
wadm::types::ComponentProperties {
application: properties.application.map(Into::into),
image: properties.image,
id: properties.id,
config: properties.config.into_iter().map(|c| c.into()).collect(),
secrets: properties.secrets.into_iter().map(|c| c.into()).collect(),
}
}
}
impl From<CapabilityProperties> for wadm::types::CapabilityProperties {
fn from(properties: CapabilityProperties) -> Self {
wadm::types::CapabilityProperties {
application: properties.application.map(Into::into),
image: properties.image,
id: properties.id,
config: properties.config.into_iter().map(|c| c.into()).collect(),
secrets: properties.secrets.into_iter().map(|c| c.into()).collect(),
}
}
}
impl From<ConfigProperty> for wadm::types::ConfigProperty {
fn from(property: ConfigProperty) -> Self {
wadm::types::ConfigProperty {
name: property.name,
properties: property.properties.map(|props| props.into_iter().collect()),
}
}
}
impl From<SecretProperty> for wadm::types::SecretProperty {
fn from(property: SecretProperty) -> Self {
wadm::types::SecretProperty {
name: property.name,
properties: property.properties.into(),
}
}
}
impl From<SecretSourceProperty> for wadm::types::SecretSourceProperty {
fn from(property: SecretSourceProperty) -> Self {
wadm::types::SecretSourceProperty {
policy: property.policy,
key: property.key,
field: property.field,
version: property.version,
}
}
}
impl From<SharedApplicationComponentProperties>
for wadm::types::SharedApplicationComponentProperties
{
fn from(properties: SharedApplicationComponentProperties) -> Self {
wadm::types::SharedApplicationComponentProperties {
name: properties.name,
component: properties.component,
}
}
}
impl From<Trait> for wadm::types::Trait {
fn from(trait_: Trait) -> Self {
wadm::types::Trait {
trait_type: trait_.trait_type,
properties: trait_.properties.into(),
}
}
}
impl From<TraitProperty> for wadm::types::TraitProperty {
fn from(property: TraitProperty) -> Self {
match property {
TraitProperty::Link(link) => wadm::types::TraitProperty::Link(link.into()),
TraitProperty::SpreadScaler(spread) => {
wadm::types::TraitProperty::Spreadscaler(spread.into())
}
TraitProperty::Custom(custom) => wadm::types::TraitProperty::Custom(custom.to_string()),
}
}
}
impl From<LinkProperty> for wadm::types::LinkProperty {
fn from(property: LinkProperty) -> Self {
wadm::types::LinkProperty {
source: property.source.map(|c| c.into()),
target: property.target.into(),
namespace: property.namespace,
package: property.package,
interfaces: property.interfaces,
name: property.name,
}
}
}
impl From<ConfigDefinition> for wadm::types::ConfigDefinition {
fn from(definition: ConfigDefinition) -> Self {
wadm::types::ConfigDefinition {
config: definition.config.into_iter().map(|c| c.into()).collect(),
secrets: definition.secrets.into_iter().map(|s| s.into()).collect(),
}
}
}
impl From<TargetConfig> for wadm::types::TargetConfig {
fn from(config: TargetConfig) -> Self {
wadm::types::TargetConfig {
name: config.name,
config: config.config.into_iter().map(|c| c.into()).collect(),
secrets: config.secrets.into_iter().map(|s| s.into()).collect(),
}
}
}
impl From<SpreadScalerProperty> for wadm::types::SpreadscalerProperty {
fn from(property: SpreadScalerProperty) -> Self {
wadm::types::SpreadscalerProperty {
instances: property.instances as u32,
spread: property.spread.into_iter().map(|s| s.into()).collect(),
}
}
}
impl From<Spread> for wadm::types::Spread {
fn from(spread: Spread) -> Self {
wadm::types::Spread {
name: spread.name,
requirements: spread.requirements.into_iter().collect(),
weight: spread.weight.map(|w| w as u32),
}
}
}
impl From<ModelSummary> for wadm::types::ModelSummary {
fn from(summary: ModelSummary) -> Self {
wadm::types::ModelSummary {
name: summary.name,
version: summary.version,
description: summary.description,
deployed_version: summary.deployed_version,
status: summary.status.into(),
status_message: summary.status_message,
}
}
}
impl From<DeleteResult> for wadm::types::DeleteResult {
fn from(result: DeleteResult) -> Self {
match result {
DeleteResult::Deleted => wadm::types::DeleteResult::Deleted,
DeleteResult::Error => wadm::types::DeleteResult::Error,
DeleteResult::Noop => wadm::types::DeleteResult::Noop,
}
}
}
impl From<GetResult> for wadm::types::GetResult {
fn from(result: GetResult) -> Self {
match result {
GetResult::Error => wadm::types::GetResult::Error,
GetResult::Success => wadm::types::GetResult::Success,
GetResult::NotFound => wadm::types::GetResult::NotFound,
}
}
}
impl From<PutResult> for wadm::types::PutResult {
fn from(result: PutResult) -> Self {
match result {
PutResult::Error => wadm::types::PutResult::Error,
PutResult::Created => wadm::types::PutResult::Created,
PutResult::NewVersion => wadm::types::PutResult::NewVersion,
}
}
}
impl From<StatusType> for wadm::types::StatusType {
fn from(status: StatusType) -> Self {
match status {
StatusType::Undeployed => wadm::types::StatusType::Undeployed,
StatusType::Reconciling => wadm::types::StatusType::Reconciling,
StatusType::Deployed => wadm::types::StatusType::Deployed,
StatusType::Failed => wadm::types::StatusType::Failed,
StatusType::Waiting => wadm::types::StatusType::Waiting,
StatusType::Unhealthy => wadm::types::StatusType::Unhealthy,
}
}
}
// Trait implementations for converting generated types to the types in the API module
impl From<wadm::types::StatusType> for StatusType {
fn from(status: wadm::types::StatusType) -> Self {
match status {
wadm::types::StatusType::Undeployed => StatusType::Undeployed,
wadm::types::StatusType::Reconciling => StatusType::Reconciling,
wadm::types::StatusType::Deployed => StatusType::Deployed,
wadm::types::StatusType::Failed => StatusType::Failed,
wadm::types::StatusType::Waiting => StatusType::Waiting,
wadm::types::StatusType::Unhealthy => StatusType::Unhealthy,
}
}
}
impl From<wadm::types::StatusInfo> for StatusInfo {
fn from(info: wadm::types::StatusInfo) -> Self {
StatusInfo {
status_type: info.status_type.into(),
message: info.message,
}
}
}
impl From<wadm::types::ComponentStatus> for ComponentStatus {
fn from(status: wadm::types::ComponentStatus) -> Self {
ComponentStatus {
name: status.name,
component_type: status.component_type,
info: status.info.into(),
traits: status
.traits
.into_iter()
.map(|t| TraitStatus {
trait_type: t.trait_type,
info: t.info.into(),
})
.collect(),
}
}
}
impl From<wadm::types::TraitStatus> for TraitStatus {
fn from(status: wadm::types::TraitStatus) -> Self {
TraitStatus {
trait_type: status.trait_type,
info: status.info.into(),
}
}
}
impl From<wadm::types::StatusResult> for StatusResult {
fn from(result: wadm::types::StatusResult) -> Self {
match result {
wadm::types::StatusResult::Error => StatusResult::Error,
wadm::types::StatusResult::Ok => StatusResult::Ok,
wadm::types::StatusResult::NotFound => StatusResult::NotFound,
}
}
}
impl From<wadm::types::OamManifest> for Manifest {
fn from(manifest: wadm::types::OamManifest) -> Self {
Manifest {
api_version: manifest.api_version,
kind: manifest.kind,
metadata: manifest.metadata.into(),
spec: manifest.spec.into(),
}
}
}
impl From<wadm::types::Metadata> for Metadata {
fn from(metadata: wadm::types::Metadata) -> Self {
Metadata {
name: metadata.name,
annotations: metadata.annotations.into_iter().collect(),
labels: metadata.labels.into_iter().collect(),
}
}
}
impl From<wadm::types::Specification> for Specification {
fn from(spec: wadm::types::Specification) -> Self {
Specification {
components: spec.components.into_iter().map(|c| c.into()).collect(),
policies: spec.policies.into_iter().map(|c| c.into()).collect(),
}
}
}
impl From<wadm::types::Component> for Component {
fn from(component: wadm::types::Component) -> Self {
Component {
name: component.name,
properties: component.properties.into(),
traits: component
.traits
.map(|traits| traits.into_iter().map(|t| t.into()).collect()),
}
}
}
impl From<wadm::types::Policy> for Policy {
fn from(policy: wadm::types::Policy) -> Self {
Policy {
name: policy.name,
properties: policy.properties.into_iter().collect(),
policy_type: policy.type_,
}
}
}
impl From<wadm::types::Properties> for Properties {
fn from(properties: wadm::types::Properties) -> Self {
match properties {
wadm::types::Properties::Component(properties) => Properties::Component {
properties: properties.into(),
},
wadm::types::Properties::Capability(properties) => Properties::Capability {
properties: properties.into(),
},
}
}
}
impl From<wadm::types::ComponentProperties> for ComponentProperties {
fn from(properties: wadm::types::ComponentProperties) -> Self {
ComponentProperties {
image: properties.image,
application: properties.application.map(Into::into),
id: properties.id,
config: properties.config.into_iter().map(|c| c.into()).collect(),
secrets: properties.secrets.into_iter().map(|c| c.into()).collect(),
}
}
}
impl From<wadm::types::CapabilityProperties> for CapabilityProperties {
fn from(properties: wadm::types::CapabilityProperties) -> Self {
CapabilityProperties {
image: properties.image,
application: properties.application.map(Into::into),
id: properties.id,
config: properties.config.into_iter().map(|c| c.into()).collect(),
secrets: properties.secrets.into_iter().map(|c| c.into()).collect(),
}
}
}
impl From<wadm::types::ConfigProperty> for ConfigProperty {
fn from(property: wadm::types::ConfigProperty) -> Self {
ConfigProperty {
name: property.name,
properties: property.properties.map(|props| props.into_iter().collect()),
}
}
}
impl From<wadm::types::SecretProperty> for SecretProperty {
fn from(property: wadm::types::SecretProperty) -> Self {
SecretProperty {
name: property.name,
properties: property.properties.into(),
}
}
}
impl From<wadm::types::SecretSourceProperty> for SecretSourceProperty {
fn from(property: wadm::types::SecretSourceProperty) -> Self {
SecretSourceProperty {
policy: property.policy,
key: property.key,
field: property.field,
version: property.version,
}
}
}
impl From<wadm::types::SharedApplicationComponentProperties>
for SharedApplicationComponentProperties
{
fn from(properties: wadm::types::SharedApplicationComponentProperties) -> Self {
SharedApplicationComponentProperties {
name: properties.name,
component: properties.component,
}
}
}
impl From<wadm::types::Trait> for Trait {
fn from(trait_: wadm::types::Trait) -> Self {
Trait {
trait_type: trait_.trait_type,
properties: trait_.properties.into(),
}
}
}
impl From<wadm::types::TraitProperty> for TraitProperty {
fn from(property: wadm::types::TraitProperty) -> Self {
match property {
wadm::types::TraitProperty::Link(link) => TraitProperty::Link(link.into()),
wadm::types::TraitProperty::Spreadscaler(spread) => {
TraitProperty::SpreadScaler(spread.into())
}
wadm::types::TraitProperty::Custom(custom) => {
TraitProperty::Custom(serde_json::value::Value::String(custom))
}
}
}
}
impl From<wadm::types::LinkProperty> for LinkProperty {
fn from(property: wadm::types::LinkProperty) -> Self {
#[allow(deprecated)]
LinkProperty {
source: property.source.map(|c| c.into()),
target: property.target.into(),
namespace: property.namespace,
package: property.package,
interfaces: property.interfaces,
name: property.name,
source_config: None,
target_config: None,
}
}
}
impl From<wadm::types::ConfigDefinition> for ConfigDefinition {
fn from(definition: wadm::types::ConfigDefinition) -> Self {
ConfigDefinition {
config: definition.config.into_iter().map(|c| c.into()).collect(),
secrets: definition.secrets.into_iter().map(|s| s.into()).collect(),
}
}
}
impl From<wadm::types::TargetConfig> for TargetConfig {
fn from(config: wadm::types::TargetConfig) -> Self {
TargetConfig {
name: config.name,
config: config.config.into_iter().map(|c| c.into()).collect(),
secrets: config.secrets.into_iter().map(|s| s.into()).collect(),
}
}
}
impl From<wadm::types::SpreadscalerProperty> for SpreadScalerProperty {
fn from(property: wadm::types::SpreadscalerProperty) -> Self {
SpreadScalerProperty {
instances: property.instances as usize,
spread: property.spread.into_iter().map(|s| s.into()).collect(),
}
}
}
impl From<wadm::types::Spread> for Spread {
fn from(spread: wadm::types::Spread) -> Self {
Spread {
name: spread.name,
requirements: spread.requirements.into_iter().collect(),
weight: spread.weight.map(|w| w as usize),
}
}
}
impl From<VersionInfo> for wadm::types::VersionInfo {
fn from(info: VersionInfo) -> Self {
wasmcloud::wadm::types::VersionInfo {
version: info.version,
deployed: info.deployed,
}
}
}
// Implement the From trait for StatusInfo
impl From<StatusInfo> for wadm::types::StatusInfo {
fn from(info: StatusInfo) -> Self {
wadm::types::StatusInfo {
status_type: info.status_type.into(),
message: info.message,
}
}
}
// Implement the From trait for Status
impl From<Status> for wadm::types::Status {
fn from(status: Status) -> Self {
wadm::types::Status {
version: status.version,
info: status.info.into(),
components: status.components.into_iter().map(|c| c.into()).collect(),
}
}
}
// Implement the From trait for ComponentStatus
impl From<ComponentStatus> for wadm::types::ComponentStatus {
fn from(component_status: ComponentStatus) -> Self {
wadm::types::ComponentStatus {
name: component_status.name,
component_type: component_status.component_type,
info: component_status.info.into(),
traits: component_status
.traits
.into_iter()
.map(|t| t.into())
.collect(),
}
}
}
// Implement the From trait for TraitStatus
impl From<TraitStatus> for wadm::types::TraitStatus {
fn from(trait_status: TraitStatus) -> Self {
wadm::types::TraitStatus {
trait_type: trait_status.trait_type,
info: trait_status.info.into(),
}
}
}

View File

@ -1,14 +1,8 @@
use std::collections::{BTreeMap, HashMap};
use schemars::JsonSchema;
use serde::{de, Deserialize, Serialize};
use utoipa::ToSchema;
use serde::{Deserialize, Serialize};
pub mod api;
#[cfg(feature = "wit")]
pub mod bindings;
#[cfg(feature = "wit")]
pub use bindings::*;
pub mod validation;
/// The default weight for a spread
@ -25,8 +19,6 @@ pub const VERSION_ANNOTATION_KEY: &str = "version";
/// The description key, as predefined by the [OAM
/// spec](https://github.com/oam-dev/spec/blob/master/metadata.md#annotations-format)
pub const DESCRIPTION_ANNOTATION_KEY: &str = "description";
/// The annotation key for shared applications
pub const SHARED_ANNOTATION_KEY: &str = "experimental.wasmcloud.dev/shared";
/// The identifier for the builtin spreadscaler trait type
pub const SPREADSCALER_TRAIT: &str = "spreadscaler";
/// The identifier for the builtin daemonscaler trait type
@ -36,12 +28,9 @@ pub const LINK_TRAIT: &str = "link";
/// The string used for indicating a latest version. It is explicitly forbidden to use as a version
/// for a manifest
pub const LATEST_VERSION: &str = "latest";
/// The default link name
pub const DEFAULT_LINK_NAME: &str = "default";
/// Manifest file based on the Open Application Model (OAM) specification for declaratively managing wasmCloud applications
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
#[serde(deny_unknown_fields)]
/// An OAM manifest
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub struct Manifest {
/// The OAM version of the manifest
#[serde(rename = "apiVersion")]
@ -72,65 +61,11 @@ impl Manifest {
.map(|v| v.as_str())
}
/// Indicates if the manifest is shared, meaning it can be used by multiple applications
pub fn shared(&self) -> bool {
self.metadata
.annotations
.get(SHARED_ANNOTATION_KEY)
.is_some_and(|v| v.parse::<bool>().unwrap_or(false))
}
/// Returns the components in the manifest
pub fn components(&self) -> impl Iterator<Item = &Component> {
self.spec.components.iter()
}
/// Helper function to find shared components that are missing from the given list of
/// deployed applications
pub fn missing_shared_components(&self, deployed_apps: &[&Manifest]) -> Vec<&Component> {
self.spec
.components
.iter()
.filter(|shared_component| {
match &shared_component.properties {
Properties::Capability {
properties:
CapabilityProperties {
image: None,
application: Some(shared_app),
..
},
}
| Properties::Component {
properties:
ComponentProperties {
image: None,
application: Some(shared_app),
..
},
} => {
if deployed_apps.iter().filter(|a| a.shared()).any(|m| {
m.metadata.name == shared_app.name
&& m.components().any(|c| {
c.name == shared_app.component
// This compares just the enum variant, not the actual properties
// For example, if we reference a shared component that's a capability,
// we want to make sure the deployed component is a capability.
&& std::mem::discriminant(&c.properties)
== std::mem::discriminant(&shared_component.properties)
})
}) {
false
} else {
true
}
}
_ => false,
}
})
.collect()
}
/// Returns only the WebAssembly components in the manifest
pub fn wasm_components(&self) -> impl Iterator<Item = &Component> {
self.components()
@ -157,24 +92,10 @@ impl Manifest {
.flatten()
.filter(|t| t.is_link())
}
/// Returns only policies in the manifest
pub fn policies(&self) -> impl Iterator<Item = &Policy> {
self.spec.policies.iter()
}
/// Returns a map of policy names to policies in the manifest
pub fn policy_lookup(&self) -> HashMap<&String, &Policy> {
self.spec
.policies
.iter()
.map(|p| (&p.name, p))
.collect::<HashMap<&String, &Policy>>()
}
}
/// The metadata describing the manifest
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub struct Metadata {
/// The name of the manifest. This must be unique per lattice
pub name: String,
@ -187,34 +108,14 @@ pub struct Metadata {
}
/// A representation of an OAM specification
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub struct Specification {
/// The list of components for describing an application
pub components: Vec<Component>,
/// The list of policies describing an application. This is for providing application-wide
/// setting such as configuration for a secrets backend, how to render Kubernetes services,
/// etc. It can be omitted if no policies are needed for an application.
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub policies: Vec<Policy>,
}
/// A policy definition
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
pub struct Policy {
/// The name of this policy
pub name: String,
/// The properties for this policy
pub properties: BTreeMap<String, String>,
/// The type of the policy
#[serde(rename = "type")]
pub policy_type: String,
}
/// A component definition
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
// TODO: figure out why this can't be uncommented
// #[serde(deny_unknown_fields)]
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub struct Component {
/// The name of this component
pub name: String,
@ -228,45 +129,8 @@ pub struct Component {
pub traits: Option<Vec<Trait>>,
}
impl Component {
fn secrets(&self) -> Vec<SecretProperty> {
let mut secrets = Vec::new();
if let Some(traits) = self.traits.as_ref() {
let l: Vec<SecretProperty> = traits
.iter()
.filter_map(|t| {
if let TraitProperty::Link(link) = &t.properties {
let mut tgt_iter = link.target.secrets.clone();
if let Some(src) = &link.source {
tgt_iter.extend(src.secrets.clone());
}
Some(tgt_iter)
} else {
None
}
})
.flatten()
.collect();
secrets.extend(l);
};
match &self.properties {
Properties::Component { properties } => {
secrets.extend(properties.secrets.clone());
}
Properties::Capability { properties } => secrets.extend(properties.secrets.clone()),
};
secrets
}
/// Returns only links in the component
fn links(&self) -> impl Iterator<Item = &Trait> {
self.traits.iter().flatten().filter(|t| t.is_link())
}
}
/// Properties that can be defined for a component
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
#[serde(tag = "type")]
pub enum Properties {
#[serde(rename = "component", alias = "actor")]
@ -275,18 +139,11 @@ pub enum Properties {
Capability { properties: CapabilityProperties },
}
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
#[serde(deny_unknown_fields)]
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub struct ComponentProperties {
/// The image reference to use. Required unless the component is a shared component
/// that is defined in another shared application.
#[serde(skip_serializing_if = "Option::is_none")]
pub image: Option<String>,
/// Information to locate a component within a shared application. Cannot be specified
/// if the image is specified.
#[serde(skip_serializing_if = "Option::is_none")]
pub application: Option<SharedApplicationComponentProperties>,
/// The component ID to use for this component. If not supplied, it will be generated
/// The image reference to use
pub image: String,
/// The component ID to use for this actor. If not supplied, it will be generated
/// as a combination of the [Metadata::name] and the image reference.
#[serde(skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
@ -294,56 +151,12 @@ pub struct ComponentProperties {
/// these values at runtime using `wasi:runtime/config.`
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub config: Vec<ConfigProperty>,
/// Named secret references to pass to the component. The component will be able to retrieve
/// these values at runtime using `wasmcloud:secrets/store`.
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub secrets: Vec<SecretProperty>,
}
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Default, ToSchema, JsonSchema)]
pub struct ConfigDefinition {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub config: Vec<ConfigProperty>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub secrets: Vec<SecretProperty>,
}
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash, ToSchema, JsonSchema)]
pub struct SecretProperty {
/// The name of the secret. This is used by a reference by the component or capability to
/// get the secret value as a resource.
pub name: String,
/// The properties of the secret that indicate how to retrieve the secret value from a secrets
/// backend and which backend to actually query.
pub properties: SecretSourceProperty,
}
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash, ToSchema, JsonSchema)]
pub struct SecretSourceProperty {
/// The policy to use for retrieving the secret.
pub policy: String,
/// The key to use for retrieving the secret from the backend.
pub key: String,
/// The field to use for retrieving the secret from the backend. This is optional and can be
/// used to retrieve a specific field from a secret.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub field: Option<String>,
/// The version of the secret to retrieve. If not supplied, the latest version will be used.
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
}
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
#[serde(deny_unknown_fields)]
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub struct CapabilityProperties {
/// The image reference to use. Required unless the component is a shared component
/// that is defined in another shared application.
#[serde(skip_serializing_if = "Option::is_none")]
pub image: Option<String>,
/// Information to locate a component within a shared application. Cannot be specified
/// if the image is specified.
#[serde(skip_serializing_if = "Option::is_none")]
pub application: Option<SharedApplicationComponentProperties>,
/// The image reference to use
pub image: String,
/// The component ID to use for this provider. If not supplied, it will be generated
/// as a combination of the [Metadata::name] and the image reference.
#[serde(skip_serializing_if = "Option::is_none")]
@ -352,22 +165,9 @@ pub struct CapabilityProperties {
/// to the provider at runtime using the provider SDK's `init()` function.
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub config: Vec<ConfigProperty>,
/// Named secret references to pass to the t. The provider will be able to retrieve
/// these values at runtime using `wasmcloud:secrets/store`.
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub secrets: Vec<SecretProperty>,
}
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
pub struct SharedApplicationComponentProperties {
/// The name of the shared application
pub name: String,
/// The name of the component in the shared application
pub component: String,
}
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
#[serde(deny_unknown_fields)]
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub struct Trait {
/// The type of trait specified. This should be a unique string for the type of scaler. As we
/// plan on supporting custom scalers, these traits are not enumerated
@ -391,11 +191,6 @@ impl Trait {
self.trait_type == LINK_TRAIT
}
/// Check if a trait is a scaler
pub fn is_scaler(&self) -> bool {
self.trait_type == SPREADSCALER_TRAIT || self.trait_type == DAEMONSCALER_TRAIT
}
/// Helper that creates a new spreadscaler type trait with the given properties
pub fn new_spreadscaler(props: SpreadScalerProperty) -> Trait {
Trait {
@ -413,9 +208,8 @@ impl Trait {
}
/// Properties for defining traits
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
#[serde(untagged)]
#[allow(clippy::large_enum_variant)]
pub enum TraitProperty {
Link(LinkProperty),
SpreadScaler(SpreadScalerProperty),
@ -437,11 +231,11 @@ impl From<SpreadScalerProperty> for TraitProperty {
}
}
// impl From<serde_json::Value> for TraitProperty {
// fn from(value: serde_json::Value) -> Self {
// Self::Custom(value)
// }
// }
impl From<serde_json::Value> for TraitProperty {
fn from(value: serde_json::Value) -> Self {
Self::Custom(value)
}
}
/// Properties for the config list associated with components, providers, and links
///
@ -457,8 +251,7 @@ impl From<SpreadScalerProperty> for TraitProperty {
///
/// Will result in two config scalers being created, one with the name `basic-kv` and one with the
/// name `default-port`. Wadm will not resolve collisions with configuration names between manifests.
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
#[serde(deny_unknown_fields)]
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub struct ConfigProperty {
/// Name of the config to ensure exists
pub name: String,
@ -477,9 +270,10 @@ impl PartialEq<ConfigProperty> for String {
}
/// Properties for links
#[derive(Debug, Serialize, Clone, PartialEq, Eq, ToSchema, JsonSchema, Default)]
#[serde(deny_unknown_fields)]
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub struct LinkProperty {
/// The target this link applies to. This should be the name of a component in the manifest
pub target: String,
/// WIT namespace for the link
pub namespace: String,
/// WIT package for the link
@ -487,115 +281,18 @@ pub struct LinkProperty {
/// WIT interfaces for the link
pub interfaces: Vec<String>,
/// Configuration to apply to the source of the link
#[serde(default, skip_serializing_if = "Option::is_none")]
pub source: Option<ConfigDefinition>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub source_config: Vec<ConfigProperty>,
/// Configuration to apply to the target of the link
pub target: TargetConfig,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub target_config: Vec<ConfigProperty>,
/// The name of this link
#[serde(skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing)]
#[deprecated(since = "0.13.0")]
pub source_config: Option<Vec<ConfigProperty>>,
#[serde(default, skip_serializing)]
#[deprecated(since = "0.13.0")]
pub target_config: Option<Vec<ConfigProperty>>,
}
impl<'de> Deserialize<'de> for LinkProperty {
fn deserialize<D>(d: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let json = serde_json::value::Value::deserialize(d)?;
let mut target = TargetConfig::default();
let mut source = None;
// Handling the old configuration -- translate to a TargetConfig
if let Some(t) = json.get("target") {
if t.is_string() {
let name = t.as_str().unwrap();
let mut tgt = vec![];
if let Some(tgt_config) = json.get("target_config") {
tgt = serde_json::from_value(tgt_config.clone()).map_err(de::Error::custom)?;
}
target = TargetConfig {
name: name.to_string(),
config: tgt,
secrets: vec![],
};
} else {
// Otherwise handle normally
target =
serde_json::from_value(json["target"].clone()).map_err(de::Error::custom)?;
}
}
if let Some(s) = json.get("source_config") {
let src: Vec<ConfigProperty> =
serde_json::from_value(s.clone()).map_err(de::Error::custom)?;
source = Some(ConfigDefinition {
config: src,
secrets: vec![],
});
}
// If the source block is present then it takes priority
if let Some(s) = json.get("source") {
source = Some(serde_json::from_value(s.clone()).map_err(de::Error::custom)?);
}
// Validate that the required keys are all present
if json.get("namespace").is_none() {
return Err(de::Error::custom("namespace is required"));
}
if json.get("package").is_none() {
return Err(de::Error::custom("package is required"));
}
if json.get("interfaces").is_none() {
return Err(de::Error::custom("interfaces is required"));
}
Ok(LinkProperty {
namespace: json["namespace"].as_str().unwrap().to_string(),
package: json["package"].as_str().unwrap().to_string(),
interfaces: json["interfaces"]
.as_array()
.unwrap()
.iter()
.map(|v| v.as_str().unwrap().to_string())
.collect(),
source,
target,
name: json.get("name").map(|v| v.as_str().unwrap().to_string()),
..Default::default()
})
}
}
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Default, ToSchema, JsonSchema)]
pub struct TargetConfig {
/// The target this link applies to. This should be the name of a component in the manifest
pub name: String,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub config: Vec<ConfigProperty>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub secrets: Vec<SecretProperty>,
}
impl PartialEq<TargetConfig> for String {
fn eq(&self, other: &TargetConfig) -> bool {
self == &other.name
}
}
/// Properties for spread scalers
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
#[serde(deny_unknown_fields)]
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub struct SpreadScalerProperty {
/// Number of instances to spread across matching requirements
#[serde(alias = "replicas")]
@ -606,8 +303,7 @@ pub struct SpreadScalerProperty {
}
/// Configuration for various spreading requirements
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
#[serde(deny_unknown_fields)]
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub struct Spread {
/// The name of this spread requirement
pub name: String,
@ -654,13 +350,13 @@ mod test {
#[test]
fn test_oam_deserializer() {
let res = deserialize_json("../../oam/simple1.json");
let res = deserialize_json("./oam/simple1.json");
match res {
Ok(parse_results) => parse_results,
Err(error) => panic!("Error {:?}", error),
};
let res = deserialize_yaml("../../oam/simple1.yaml");
let res = deserialize_yaml("./oam/simple1.yaml");
match res {
Ok(parse_results) => parse_results,
Err(error) => panic!("Error {:?}", error),
@ -670,14 +366,14 @@ mod test {
#[test]
#[ignore] // see TODO in TraitProperty enum
fn test_custom_traits() {
let manifest = deserialize_yaml("../../oam/custom.yaml").expect("Should be able to parse");
let component = manifest
let manifest = deserialize_yaml("./oam/custom.yaml").expect("Should be able to parse");
let actor_component = manifest
.spec
.components
.into_iter()
.find(|comp| matches!(comp.properties, Properties::Component { .. }))
.expect("Should be able to find component");
let traits = component.traits.expect("Should have Vec of traits");
.expect("Should be able to find actor component");
let traits = actor_component.traits.expect("Should have Vec of traits");
assert!(
traits
.iter()
@ -688,7 +384,7 @@ mod test {
#[test]
fn test_config() {
let manifest = deserialize_yaml("../../oam/config.yaml").expect("Should be able to parse");
let manifest = deserialize_yaml("./oam/config.yaml").expect("Should be able to parse");
let props = match &manifest.spec.components[0].properties {
Properties::Component { properties } => properties,
_ => panic!("Should have found capability component"),
@ -721,7 +417,7 @@ mod test {
#[test]
fn test_component_matching() {
let manifest = deserialize_yaml("../../oam/simple2.yaml").expect("Should be able to parse");
let manifest = deserialize_yaml("./oam/simple2.yaml").expect("Should be able to parse");
assert_eq!(
manifest
.spec
@ -730,7 +426,7 @@ mod test {
.filter(|component| matches!(component.properties, Properties::Component { .. }))
.count(),
1,
"Should have found 1 component property"
"Should have found 1 actor property"
);
assert_eq!(
manifest
@ -746,15 +442,15 @@ mod test {
#[test]
fn test_trait_matching() {
let manifest = deserialize_yaml("../../oam/simple2.yaml").expect("Should be able to parse");
// Validate component traits
let manifest = deserialize_yaml("./oam/simple2.yaml").expect("Should be able to parse");
// Validate actor component traits
let traits = manifest
.spec
.components
.clone()
.into_iter()
.find(|component| matches!(component.properties, Properties::Component { .. }))
.expect("Should find component component")
.expect("Should find actor component")
.traits
.expect("Should have traits object");
assert_eq!(traits.len(), 1, "Should have 1 trait");
@ -772,7 +468,7 @@ mod test {
&component.properties,
Properties::Capability {
properties: CapabilityProperties { image, .. }
} if image.clone().expect("image to be present") == "wasmcloud.azurecr.io/httpserver:0.13.1"
} if image == "wasmcloud.azurecr.io/httpserver:0.13.1"
)
})
.expect("Should find capability component")
@ -784,8 +480,8 @@ mod test {
"Should have link property"
);
if let TraitProperty::Link(ld) = &traits[0].properties {
assert_eq!(ld.source.as_ref().unwrap().config, vec![]);
assert_eq!(ld.target.name, "userinfo".to_string());
assert_eq!(ld.source_config, vec![]);
assert_eq!(ld.target, "userinfo".to_string());
} else {
panic!("trait property was not a link definition");
}
@ -814,24 +510,16 @@ mod test {
let trait_item = Trait::new_spreadscaler(spreadscalerprop);
trait_vec.push(trait_item);
let linkdefprop = LinkProperty {
target: TargetConfig {
name: "webcap".to_string(),
..Default::default()
},
target: "webcap".to_string(),
namespace: "wasi".to_string(),
package: "http".to_string(),
interfaces: vec!["incoming-handler".to_string()],
source: Some(ConfigDefinition {
config: {
vec![ConfigProperty {
name: "http".to_string(),
properties: Some(HashMap::from([("port".to_string(), "8080".to_string())])),
}]
},
..Default::default()
}),
source_config: vec![ConfigProperty {
name: "http".to_string(),
properties: Some(HashMap::from([("port".to_string(), "8080".to_string())])),
}],
target_config: vec![],
name: Some("default".to_string()),
..Default::default()
};
let trait_item = Trait::new_link(linkdefprop);
trait_vec.push(trait_item);
@ -840,11 +528,9 @@ mod test {
name: "userinfo".to_string(),
properties: Properties::Component {
properties: ComponentProperties {
image: Some("wasmcloud.azurecr.io/fake:1".to_string()),
application: None,
image: "wasmcloud.azurecr.io/fake:1".to_string(),
id: None,
config: vec![],
secrets: vec![],
},
},
traits: Some(trait_vec),
@ -854,11 +540,9 @@ mod test {
name: "webcap".to_string(),
properties: Properties::Capability {
properties: CapabilityProperties {
image: Some("wasmcloud.azurecr.io/httpserver:0.13.1".to_string()),
application: None,
image: "wasmcloud.azurecr.io/httpserver:0.13.1".to_string(),
id: None,
config: vec![],
secrets: vec![],
},
},
traits: None,
@ -883,11 +567,9 @@ mod test {
name: "ledblinky".to_string(),
properties: Properties::Capability {
properties: CapabilityProperties {
image: Some("wasmcloud.azurecr.io/ledblinky:0.0.1".to_string()),
application: None,
image: "wasmcloud.azurecr.io/ledblinky:0.0.1".to_string(),
id: None,
config: vec![],
secrets: vec![],
},
},
traits: Some(trait_vec),
@ -896,7 +578,6 @@ mod test {
let spec = Specification {
components: component_vec,
policies: vec![],
};
let metadata = Metadata {
name: "my-example-app".to_string(),
@ -959,29 +640,4 @@ mod test {
"Should have found custom properties"
);
}
#[test]
fn test_deprecated_fields_not_set() {
let manifest = deserialize_yaml("../../oam/simple2.yaml").expect("Should be able to parse");
// Validate component traits
let traits = manifest
.spec
.components
.clone()
.into_iter()
.filter(|component| matches!(component.name.as_str(), "webcap"))
.find(|component| matches!(component.properties, Properties::Capability { .. }))
.expect("Should find component component")
.traits
.expect("Should have traits object");
assert_eq!(traits.len(), 1, "Should have 1 trait");
if let TraitProperty::Link(ld) = &traits[0].properties {
assert_eq!(ld.source.as_ref().unwrap().config, vec![]);
#[allow(deprecated)]
let source_config = &ld.source_config;
assert_eq!(source_config, &None);
} else {
panic!("trait property was not a link definition");
};
}
}

View File

@ -1,8 +1,7 @@
//! Logic for model ([`Manifest`]) validation
//!
use std::collections::{HashMap, HashSet};
#[cfg(not(target_family = "wasm"))]
use std::collections::HashMap;
use std::path::Path;
use std::sync::OnceLock;
@ -10,10 +9,7 @@ use anyhow::{Context as _, Result};
use regex::Regex;
use serde::{Deserialize, Serialize};
use crate::{
CapabilityProperties, ComponentProperties, LinkProperty, Manifest, Properties, Trait,
TraitProperty, DEFAULT_LINK_NAME, LATEST_VERSION,
};
use crate::{LinkProperty, Manifest, TraitProperty, LATEST_VERSION};
/// A namespace -> package -> interface lookup
type KnownInterfaceLookup = HashMap<String, HashMap<String, HashMap<String, ()>>>;
@ -25,8 +21,6 @@ type KnownInterfaceLookup = HashMap<String, HashMap<String, HashMap<String, ()>>
/// a known namespace and package, interfaces should generally be well known.
static KNOWN_INTERFACE_LOOKUP: OnceLock<KnownInterfaceLookup> = OnceLock::new();
const SECRET_POLICY_TYPE: &str = "policy.secret.wasmcloud.dev/v1alpha1";
/// Get the static list of known interfaces
fn get_known_interface_lookup() -> &'static KnownInterfaceLookup {
KNOWN_INTERFACE_LOOKUP.get_or_init(|| {
@ -61,12 +55,7 @@ fn get_known_interface_lookup() -> &'static KnownInterfaceLookup {
("config".into(), HashMap::from([("runtime".into(), ())])),
(
"keyvalue".into(),
HashMap::from([
("atomics".into(), ()),
("store".into(), ()),
("batch".into(), ()),
("watch".into(), ()),
]),
HashMap::from([("atomics".into(), ()), ("store".into(), ())]),
),
(
"http".into(),
@ -160,10 +149,9 @@ fn is_invalid_known_interface(
};
// Unknown interface inside known namespace and package is probably a bug
if !iface_lookup.contains_key(interface) {
// Unknown package inside a known interface we control is probably a bug, but may be
// a new interface we don't know about yet
// Unknown package inside a known interface we control is probably a bug
return vec![ValidationFailure::new(
ValidationFailureLevel::Warning,
ValidationFailureLevel::Error,
format!("unrecognized interface [{namespace}:{package}/{interface}]"),
)];
}
@ -275,7 +263,6 @@ impl ValidationOutput for Vec<ValidationFailure> {
/// # Arguments
///
/// * `path` - Path to the Manifest that will be read into memory and validated
#[cfg(not(target_family = "wasm"))]
pub async fn validate_manifest_file(
path: impl AsRef<Path>,
) -> Result<(Manifest, Vec<ValidationFailure>)> {
@ -299,12 +286,9 @@ pub async fn validate_manifest_file(
pub async fn validate_manifest_bytes(
content: impl AsRef<[u8]>,
) -> Result<(Manifest, Vec<ValidationFailure>)> {
let raw_yaml_content = content.as_ref();
let manifest =
serde_yaml::from_slice(content.as_ref()).context("failed to parse manifest content")?;
let mut failures = validate_manifest(&manifest).await?;
let mut yaml_issues = validate_raw_yaml(raw_yaml_content)?;
failures.append(&mut yaml_issues);
let failures = validate_manifest(&manifest).await?;
Ok((manifest, failures))
}
@ -314,7 +298,6 @@ pub async fn validate_manifest_bytes(
/// - unsupported interfaces (i.e. typos, etc)
/// - unknown packages under known namespaces
/// - "dangling" links (missing components)
/// - secrets mapped to unknown policies
///
/// Since `[ValidationFailure]` implements `ValidationOutput`, you can call `valid()` and other
/// trait methods on it:
@ -342,126 +325,11 @@ pub async fn validate_manifest(manifest: &Manifest) -> Result<Vec<ValidationFail
.into_iter()
.cloned(),
);
failures.extend(core_validation(manifest));
failures.extend(check_misnamed_interfaces(manifest));
failures.extend(check_dangling_links(manifest));
failures.extend(validate_policies(manifest));
failures.extend(ensure_no_custom_traits(manifest));
failures.extend(validate_component_properties(manifest));
failures.extend(check_duplicate_links(manifest));
failures.extend(validate_link_configs(manifest));
Ok(failures)
}
pub fn validate_raw_yaml(content: &[u8]) -> Result<Vec<ValidationFailure>> {
let mut failures = Vec::new();
let raw_content: serde_yaml::Value =
serde_yaml::from_slice(content).context("failed read raw yaml content")?;
failures.extend(validate_components_configs(&raw_content));
Ok(failures)
}
fn core_validation(manifest: &Manifest) -> Vec<ValidationFailure> {
let mut failures = Vec::new();
let mut name_registry: HashSet<String> = HashSet::new();
let mut id_registry: HashSet<String> = HashSet::new();
let mut required_capability_components: HashSet<String> = HashSet::new();
for label in manifest.metadata.labels.iter() {
if !valid_oam_label(label) {
failures.push(ValidationFailure::new(
ValidationFailureLevel::Error,
format!("Invalid OAM label: {:?}", label),
));
}
}
for annotation in manifest.metadata.annotations.iter() {
if !valid_oam_label(annotation) {
failures.push(ValidationFailure::new(
ValidationFailureLevel::Error,
format!("Invalid OAM annotation: {:?}", annotation),
));
}
}
for component in manifest.spec.components.iter() {
// Component name validation : each component (components or providers) should have a unique name
if !name_registry.insert(component.name.clone()) {
failures.push(ValidationFailure::new(
ValidationFailureLevel::Error,
format!("Duplicate component name in manifest: {}", component.name),
));
}
// Provider validation :
// Provider config should be serializable [For all components that have JSON config, validate that it can serialize.
// We need this so it doesn't trigger an error when sending a command down the line]
// Providers should have a unique image ref and link name
if let Properties::Capability {
properties:
CapabilityProperties {
id: Some(component_id),
config: _capability_config,
..
},
} = &component.properties
{
if !id_registry.insert(component_id.to_string()) {
failures.push(ValidationFailure::new(
ValidationFailureLevel::Error,
format!(
"Duplicate component identifier in manifest: {}",
component_id
),
));
}
}
// Component validation : Components should have a unique identifier per manifest
if let Properties::Component {
properties: ComponentProperties { id: Some(id), .. },
} = &component.properties
{
if !id_registry.insert(id.to_string()) {
failures.push(ValidationFailure::new(
ValidationFailureLevel::Error,
format!("Duplicate component identifier in manifest: {}", id),
));
}
}
// Linkdef validation : A linkdef from a component should have a unique target and reference
if let Some(traits_vec) = &component.traits {
for trait_item in traits_vec.iter() {
if let Trait {
// TODO : add trait type validation after custom types are done. See TraitProperty enum.
properties: TraitProperty::Link(LinkProperty { target, .. }),
..
} = &trait_item
{
// Multiple components{ with type != 'capability'} can declare the same target, so we don't need to check for duplicates on insert
required_capability_components.insert(target.name.to_string());
}
}
}
}
let missing_capability_components = required_capability_components
.difference(&name_registry)
.collect::<Vec<&String>>();
if !missing_capability_components.is_empty() {
failures.push(ValidationFailure::new(
ValidationFailureLevel::Error,
format!(
"The following capability component(s) are missing from the manifest: {:?}",
missing_capability_components
),
));
};
failures
}
/// Check for misnamed host-supported interfaces in the manifest
fn check_misnamed_interfaces(manifest: &Manifest) -> Vec<ValidationFailure> {
let mut failures = Vec::new();
@ -470,8 +338,6 @@ fn check_misnamed_interfaces(manifest: &Manifest) -> Vec<ValidationFailure> {
namespace,
package,
interfaces,
target: _target,
source: _source,
..
}) = &link_trait.properties
{
@ -484,32 +350,6 @@ fn check_misnamed_interfaces(manifest: &Manifest) -> Vec<ValidationFailure> {
failures
}
/// This validation rule should eventually be removed, but at this time (as of wadm 0.14.0)
/// custom traits are not supported. We technically deserialize the custom trait, but 99%
/// of the time this is just a poorly formatted spread or link scaler which is incredibly
/// frustrating to debug.
fn ensure_no_custom_traits(manifest: &Manifest) -> Vec<ValidationFailure> {
let mut failures = Vec::new();
for component in manifest.components() {
if let Some(traits) = &component.traits {
for trait_item in traits {
match &trait_item.properties {
TraitProperty::Custom(trt) if trait_item.is_link() => failures.push(ValidationFailure::new(
ValidationFailureLevel::Error,
format!("Link trait deserialized as custom trait, ensure fields are correct: {}", trt),
)),
TraitProperty::Custom(trt) if trait_item.is_scaler() => failures.push(ValidationFailure::new(
ValidationFailureLevel::Error,
format!("Scaler trait deserialized as custom trait, ensure fields are correct: {}", trt),
)),
_ => (),
}
}
}
}
failures
}
/// Check for "dangling" links, which contain targets that are not specified elsewhere in the
/// WADM manifest.
///
@ -522,16 +362,8 @@ fn check_dangling_links(manifest: &Manifest) -> Vec<ValidationFailure> {
for link_trait in manifest.links() {
match &link_trait.properties {
TraitProperty::Custom(obj) => {
if obj.get("target").is_none() {
failures.push(ValidationFailure::new(
ValidationFailureLevel::Error,
"custom link is missing 'target' property".into(),
));
continue;
}
// Ensure target property is present
match obj["target"]["name"].as_str() {
// Ensure target property it present
match obj["target"].as_str() {
// If target is present, ensure it's pointing to a known component
Some(target) if !lookup.contains_key(&String::from(target)) => {
failures.push(ValidationFailure::new(
@ -544,21 +376,21 @@ fn check_dangling_links(manifest: &Manifest) -> Vec<ValidationFailure> {
// if target property is not present, note that it is missing
None => failures.push(ValidationFailure::new(
ValidationFailureLevel::Error,
"custom link is missing 'target' name property".into(),
"custom link is missing 'target' property".into(),
)),
}
}
TraitProperty::Link(LinkProperty { name, target, .. }) => {
let link_identifier = name
.as_ref()
.map(|n| format!("(name [{n}])"))
.unwrap_or_else(|| format!("(target [{}])", target.name));
if !lookup.contains_key(&target.name) {
.unwrap_or_else(|| format!("(target [{target}])"));
if !lookup.contains_key(target) {
failures.push(ValidationFailure::new(
ValidationFailureLevel::Warning,
format!(
"link {link_identifier} target [{}] is not a listed component",
target.name
"link {link_identifier} target [{target}] is not a listed component"
),
))
}
@ -571,311 +403,6 @@ fn check_dangling_links(manifest: &Manifest) -> Vec<ValidationFailure> {
failures
}
/// Ensure that a manifest has secrets that are mapped to known policies
/// and that those policies have the expected type and properties.
fn validate_policies(manifest: &Manifest) -> Vec<ValidationFailure> {
let policies = manifest.policy_lookup();
let mut failures = Vec::new();
for c in manifest.components() {
// Ensure policies meant for secrets are valid
for secret in c.secrets() {
match policies.get(&secret.properties.policy) {
Some(policy) if policy.policy_type != SECRET_POLICY_TYPE => {
failures.push(ValidationFailure::new(
ValidationFailureLevel::Error,
format!(
"secret '{}' is mapped to policy '{}' which is not a secret policy. Expected type '{SECRET_POLICY_TYPE}'",
secret.name, secret.properties.policy
),
))
}
Some(policy) => {
if !policy.properties.contains_key("backend") {
failures.push(ValidationFailure::new(
ValidationFailureLevel::Error,
format!(
"secret '{}' is mapped to policy '{}' which does not include a 'backend' property",
secret.name, secret.properties.policy
),
))
}
}
None => failures.push(ValidationFailure::new(
ValidationFailureLevel::Error,
format!(
"secret '{}' is mapped to unknown policy '{}'",
secret.name, secret.properties.policy
),
)),
}
}
}
failures
}
/// Ensure that all components in a manifest either specify an image reference or a shared
/// component in a different manifest. Note that this does not validate that the image reference
/// is valid or that the shared component is valid, only that one of the two properties is set.
pub fn validate_component_properties(application: &Manifest) -> Vec<ValidationFailure> {
let mut failures = Vec::new();
for component in application.spec.components.iter() {
match &component.properties {
Properties::Component {
properties:
ComponentProperties {
image,
application,
config,
secrets,
..
},
}
| Properties::Capability {
properties:
CapabilityProperties {
image,
application,
config,
secrets,
..
},
} => match (image, application) {
(Some(_), Some(_)) => {
failures.push(ValidationFailure::new(
ValidationFailureLevel::Error,
"Component cannot have both 'image' and 'application' properties".into(),
));
}
(None, None) => {
failures.push(ValidationFailure::new(
ValidationFailureLevel::Error,
"Component must have either 'image' or 'application' property".into(),
));
}
// This is a problem because of our left-folding config implementation. A shared application
// could specify additional config and actually overwrite the original manifest's config.
(None, Some(shared_properties)) if !config.is_empty() => {
failures.push(ValidationFailure::new(
ValidationFailureLevel::Error,
format!(
"Shared component '{}' cannot specify additional 'config'",
shared_properties.name
),
));
}
(None, Some(shared_properties)) if !secrets.is_empty() => {
failures.push(ValidationFailure::new(
ValidationFailureLevel::Error,
format!(
"Shared component '{}' cannot specify additional 'secrets'",
shared_properties.name
),
));
}
// Shared application components already have scale properties defined in their original manifest
(None, Some(shared_properties))
if component
.traits
.as_ref()
.is_some_and(|traits| traits.iter().any(|trt| trt.is_scaler())) =>
{
failures.push(ValidationFailure::new(
ValidationFailureLevel::Error,
format!(
"Shared component '{}' cannot include a scaler trait",
shared_properties.name
),
));
}
_ => {}
},
}
}
failures
}
/// Validates link configs in a WADM application manifest.
///
/// At present this can check for:
/// - all configs that declare `properties` have unique names
/// (configs without properties refer to existing configs)
///
pub fn validate_link_configs(manifest: &Manifest) -> Vec<ValidationFailure> {
let mut failures = Vec::new();
let mut link_config_names = HashSet::new();
for link_trait in manifest.links() {
if let TraitProperty::Link(LinkProperty { target, source, .. }) = &link_trait.properties {
for config in &target.config {
// we only need to check for uniqueness of configs with properties
if config.properties.is_none() {
continue;
}
// Check if config name is unique
if !link_config_names.insert(config.name.clone()) {
failures.push(ValidationFailure::new(
ValidationFailureLevel::Error,
format!("Duplicate link config name found: '{}'", config.name),
));
}
}
if let Some(source) = source {
for config in &source.config {
// we only need to check for uniqueness of configs with properties
if config.properties.is_none() {
continue;
}
// Check if config name is unique
if !link_config_names.insert(config.name.clone()) {
failures.push(ValidationFailure::new(
ValidationFailureLevel::Error,
format!("Duplicate link config name found: '{}'", config.name),
));
}
}
}
}
}
failures
}
/// Funtion to validate the component configs
/// from 0.13.0 source_config is deprecated and replaced with source:config:
/// this function validates the raw yaml to check for deprecated source_config and target_config
pub fn validate_components_configs(application: &serde_yaml::Value) -> Vec<ValidationFailure> {
let mut failures = Vec::new();
if let Some(specs) = application.get("spec") {
if let Some(components) = specs.get("components") {
if let Some(components_sequence) = components.as_sequence() {
for component in components_sequence.iter() {
failures.extend(get_deprecated_configs(component));
}
}
}
}
failures
}
fn get_deprecated_configs(component: &serde_yaml::Value) -> Vec<ValidationFailure> {
let mut failures = vec![];
if let Some(traits) = component.get("traits") {
if let Some(traits_sequence) = traits.as_sequence() {
for trait_ in traits_sequence.iter() {
if let Some(trait_type) = trait_.get("type") {
if trait_type.ne("link") {
continue;
}
}
if let Some(trait_properties) = trait_.get("properties") {
if trait_properties.get("source_config").is_some() {
failures.push(ValidationFailure {
level: ValidationFailureLevel::Warning,
msg: "one of the components' link trait contains a source_config key, please use source:config: rather".to_string(),
});
}
if trait_properties.get("target_config").is_some() {
failures.push(ValidationFailure {
level: ValidationFailureLevel::Warning,
msg: "one of the components' link trait contains a target_config key, please use target:config: rather".to_string(),
});
}
}
}
}
}
failures
}
/// This function validates that a key/value pair is a valid OAM label. It's using fairly
/// basic validation rules to ensure that the manifest isn't doing anything horribly wrong. Keeping
/// this function free of regex is intentional to keep this code functional but simple.
///
/// See <https://github.com/oam-dev/spec/blob/master/metadata.md#metadata> for details
pub fn valid_oam_label(label: (&String, &String)) -> bool {
let (key, _) = label;
match key.split_once('/') {
Some((prefix, name)) => is_valid_dns_subdomain(prefix) && is_valid_label_name(name),
None => is_valid_label_name(key),
}
}
pub fn is_valid_dns_subdomain(s: &str) -> bool {
if s.is_empty() || s.len() > 253 {
return false;
}
s.split('.').all(|part| {
// Ensure each part is non-empty, <= 63 characters, starts with an alphabetic character,
// ends with an alphanumeric character, and contains only alphanumeric characters or hyphens
!part.is_empty()
&& part.len() <= 63
&& part.starts_with(|c: char| c.is_ascii_alphabetic())
&& part.ends_with(|c: char| c.is_ascii_alphanumeric())
&& part.chars().all(|c| c.is_ascii_alphanumeric() || c == '-')
})
}
// Ensure each name is non-empty, <= 63 characters, starts with an alphanumeric character,
// ends with an alphanumeric character, and contains only alphanumeric characters, hyphens,
// underscores, or periods
pub fn is_valid_label_name(name: &str) -> bool {
if name.is_empty() || name.len() > 63 {
return false;
}
name.starts_with(|c: char| c.is_ascii_alphanumeric())
&& name.ends_with(|c: char| c.is_ascii_alphanumeric())
&& name
.chars()
.all(|c| c.is_ascii_alphanumeric() || c == '-' || c == '_' || c == '.')
}
/// Checks whether a manifest contains "duplicate" links.
///
/// Multiple links from the same source with the same name, namespace, package and interface
/// are considered duplicate links.
fn check_duplicate_links(manifest: &Manifest) -> Vec<ValidationFailure> {
let mut failures = Vec::new();
for component in manifest.components() {
let mut link_ids = HashSet::new();
for link in component.links() {
if let TraitProperty::Link(LinkProperty {
name,
namespace,
package,
interfaces,
..
}) = &link.properties
{
for interface in interfaces {
if !link_ids.insert((
name.clone()
.unwrap_or_else(|| DEFAULT_LINK_NAME.to_string()),
namespace,
package,
interface,
)) {
failures.push(ValidationFailure::new(
ValidationFailureLevel::Error,
format!(
"Duplicate link found inside component '{}': {} ({}:{}/{})",
component.name,
name.clone()
.unwrap_or_else(|| DEFAULT_LINK_NAME.to_string()),
namespace,
package,
interface
),
));
};
}
}
}
}
failures
}
#[cfg(test)]
mod tests {
use super::is_valid_manifest_name;

View File

@ -1,4 +0,0 @@
[wadm]
path = "../../../wit/wadm"
sha256 = "9795ab1a83023da07da2dc28d930004bd913b9dbf07d68d9ef9207a44348a169"
sha512 = "9a94f33fd861912c81efd441cd19cc8066dbb2df5c2236d0472b66294bddc20ec5ad569484be18334d8c104ae9647b2c81c9878210ac35694ad8ba4a5b3780be"

View File

@ -1 +0,0 @@
wadm = "../../../wit/wadm"

View File

@ -1,48 +0,0 @@
package wasmcloud:wadm@0.2.0;
/// A Wadm client which interacts with the wadm api
interface client {
use types.{
version-info,
status,
model-summary,
oam-manifest
};
// Deploys a model to the WADM system.
// If no lattice is provided, the default lattice name 'default' is used.
deploy-model: func(model-name: string, version: option<string>, lattice: option<string>) -> result<string, string>;
// Undeploys a model from the WADM system.
undeploy-model: func(model-name: string, lattice: option<string>, non-destructive: bool) -> result<_, string>;
// Stores the application manifest for later deploys.
// Model is the full YAML or JSON string in this case
// Returns the model name and version respectively.
put-model: func(model: string, lattice: option<string>) -> result<tuple<string, string>, string>;
/// Store an oam manifest directly for later deploys.
put-manifest: func(manifest: oam-manifest, lattice: option<string>) -> result<tuple<string, string>, string>;
// Retrieves the history of a given model name.
get-model-history: func(model-name: string, lattice: option<string>) -> result<list<version-info>, string>;
// Retrieves the status of a given model by name.
get-model-status: func(model-name: string, lattice: option<string>) -> result<status, string>;
// Retrieves details on a given model.
get-model-details: func(model-name: string, version: option<string>, lattice: option<string>) -> result<oam-manifest, string>;
// Deletes a model version from the WADM system.
delete-model-version: func(model-name: string, version: option<string>, lattice: option<string>) -> result<bool, string>;
// Retrieves all application manifests.
get-models: func(lattice: option<string>) -> result<list<model-summary>, string>;
}
interface handler {
use types.{status-update};
// Callback handled to invoke a function when an update is received from a app status subscription
handle-status-update: func(msg: status-update) -> result<_, string>;
}

View File

@ -1,218 +0,0 @@
package wasmcloud:wadm@0.2.0;
interface types {
record model-summary {
name: string,
version: string,
description: option<string>,
deployed-version: option<string>,
status: status-type,
status-message: option<string>
}
record version-info {
version: string,
deployed: bool
}
record status-update {
app: string,
status: status
}
record status {
version: string,
info: status-info,
components: list<component-status>
}
record component-status {
name: string,
component-type: string,
info: status-info,
traits: list<trait-status>
}
record trait-status {
trait-type: string,
info: status-info
}
record status-info {
status-type: status-type,
message: string
}
enum put-result {
error,
created,
new-version
}
enum get-result {
error,
success,
not-found
}
enum status-result {
error,
ok,
not-found
}
enum delete-result {
deleted,
error,
noop
}
enum status-type {
undeployed,
reconciling,
deployed,
failed,
waiting,
unhealthy
}
enum deploy-result {
error,
acknowledged,
not-found
}
// The overall structure of an OAM manifest.
record oam-manifest {
api-version: string,
kind: string,
metadata: metadata,
spec: specification,
}
// Metadata describing the manifest
record metadata {
name: string,
annotations: list<tuple<string, string>>,
labels: list<tuple<string, string>>,
}
// The specification for this manifest
record specification {
components: list<component>,
policies: list<policy>
}
// A component definition
record component {
name: string,
properties: properties,
traits: option<list<trait>>,
}
// Properties that can be defined for a component
variant properties {
component(component-properties),
capability(capability-properties),
}
// Properties for a component
record component-properties {
image: option<string>,
application: option<shared-application-component-properties>,
id: option<string>,
config: list<config-property>,
secrets: list<secret-property>,
}
// Properties for a capability
record capability-properties {
image: option<string>,
application: option<shared-application-component-properties>,
id: option<string>,
config: list<config-property>,
secrets: list<secret-property>,
}
// A policy definition
record policy {
name: string,
properties: list<tuple<string, string>>,
%type: string,
}
// A trait definition
record trait {
trait-type: string,
properties: trait-property,
}
// Properties for defining traits
variant trait-property {
link(link-property),
spreadscaler(spreadscaler-property),
custom(string),
}
// Properties for links
record link-property {
namespace: string,
%package: string,
interfaces: list<string>,
source: option<config-definition>,
target: target-config,
name: option<string>,
}
// Configuration definition
record config-definition {
config: list<config-property>,
secrets: list<secret-property>,
}
// Configuration properties
record config-property {
name: string,
properties: option<list<tuple<string, string>>>,
}
// Secret properties
record secret-property {
name: string,
properties: secret-source-property,
}
// Secret source properties
record secret-source-property {
policy: string,
key: string,
field: option<string>,
version: option<string>,
}
// Shared application component properties
record shared-application-component-properties {
name: string,
component: string
}
// Target configuration
record target-config {
name: string,
config: list<config-property>,
secrets: list<secret-property>,
}
// Properties for spread scalers
record spreadscaler-property {
instances: u32,
spread: list<spread>,
}
// Configuration for various spreading requirements
record spread {
name: string,
requirements: list<tuple<string, string>>,
weight: option<u32>,
}
}

View File

@ -1,7 +0,0 @@
package wasmcloud:wadm-types@0.2.0;
world interfaces {
import wasmcloud:wadm/types@0.2.0;
import wasmcloud:wadm/client@0.2.0;
import wasmcloud:wadm/handler@0.2.0;
}

View File

@ -1,7 +1,7 @@
[package]
name = "wadm"
description = "wasmCloud Application Deployment Manager: A tool for running Wasm applications in wasmCloud"
version.workspace = true
version = "0.12.0"
edition = "2021"
authors = ["wasmCloud Team"]
keywords = ["webassembly", "wasmcloud", "wadm"]
@ -9,29 +9,21 @@ license = "Apache-2.0"
readme = "../../README.md"
repository = "https://github.com/wasmcloud/wadm"
[features]
# Enables clap attributes on the wadm configuration struct
cli = ["clap"]
http_admin = ["http", "http-body-util", "hyper", "hyper-util"]
default = []
[package.metadata.cargo-machete]
ignored = ["cloudevents-sdk"]
[dependencies]
anyhow = { workspace = true }
async-nats = { workspace = true }
async-trait = { workspace = true }
base64 = { workspace = true }
bytes = { workspace = true }
chrono = { workspace = true }
clap = { workspace = true, optional = true, features = ["derive", "cargo", "env"]}
cloudevents-sdk = { workspace = true }
http = { workspace = true, features = ["std"], optional = true }
http-body-util = { workspace = true, optional = true }
hyper = { workspace = true, optional = true }
hyper-util = { workspace = true, features = ["server"], optional = true }
futures = { workspace = true }
indexmap = { workspace = true, features = ["serde"] }
jsonschema = { workspace = true }
lazy_static = { workspace = true }
nkeys = { workspace = true }
rand = { workspace = true, features = ["small_rng"] }
regex = { workspace = true }
semver = { workspace = true, features = ["serde"] }
serde = { workspace = true }
serde_json = { workspace = true }
@ -45,7 +37,6 @@ ulid = { workspace = true, features = ["serde"] }
uuid = { workspace = true }
wadm-types = { workspace = true }
wasmcloud-control-interface = { workspace = true }
wasmcloud-secrets-types = { workspace = true }
[dev-dependencies]
serial_test = "3"
serial_test = "1"

411
crates/wadm/oam.schema.json Normal file
View File

@ -0,0 +1,411 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"$id": "http://oam.dev/v1/oam.application_configuration.schema.json",
"title": "Manifest",
"description": "A JSON Schema to validate wasmCloud Application Deployment Manager (WADM) manifests",
"type": "object",
"properties": {
"apiVersion": {
"type": "string",
"description": "The specific version of the Open Application Model specification in use"
},
"kind": {
"type": "string",
"description": "The entity type being described in the manifest"
},
"metadata": {
"type": "object",
"description": "Application configuration metadata.",
"properties": {
"name": {
"type": "string"
},
"annotations": {
"type": "object",
"description": "A set of string key/value pairs used as arbitrary annotations on this application configuration.",
"properties": {
"description": {
"type": "string"
}
},
"additionalProperties": {
"type": "string"
}
}
}
},
"spec": {
"type": "object",
"description": "Configuration attributes for various items in the lattice",
"$ref": "#/definitions/manifestSpec"
}
},
"required": [
"apiVersion",
"kind",
"metadata",
"spec"
],
"additionalProperties": false,
"definitions": {
"manifestSpec": {
"type": "object",
"properties": {
"components": {
"type": "array",
"description": "Component instance definitions.",
"items": {
"type": "object",
"anyOf": [
{
"$ref": "#/definitions/wasmComponent"
},
{
"$ref": "#/definitions/providerComponent"
}
]
}
}
},
"required": [
"components"
],
"additionalProperties": false
},
"opconfigVariable": {
"type": "object",
"description": "The Variables section defines variables that may be used elsewhere in the application configuration. The variable section provides a way for an application operator to specify common values that can be substituted into multiple other locations in this configuration (using the [fromVariable(VARNAME)] syntax).",
"properties": {
"name": {
"type": "string",
"description": "The parameter's name. Must be unique per configuration.",
"$comment": "Some systems have upper bounds for name length. Do we limit here?",
"maxLength": 128
},
"value": {
"type": "string",
"description": "The scalar value."
}
},
"required": [
"name",
"value"
],
"additionalProperties": false
},
"applicationScope": {
"type": "object",
"description": "The scope section defines application scopes that will be created with this application configuration.",
"properties": {
"name": {
"type": "string",
"description": "The name of the application scope. Must be unique to the deployment environment."
},
"type": {
"type": "string",
"description": "The fully-qualified GROUP/VERSION.KIND name of the application scope."
},
"properties": {
"type": "object",
"description": "The properties attached to this scope.",
"$ref": "#/definitions/propertiesObject"
}
},
"required": [
"name",
"type"
],
"additionalProperties": false
},
"wasmComponent": {
"type": "object",
"description": "This section defines the instances of components to create with this application configuration.",
"properties": {
"name": {
"type": "string",
"description": "The name of the component to create an instance of."
},
"type": {
"description": "The type of instance : component.",
"anyOf": [
{
"const": "component"
},
{
"const": "actor",
"$comment": "Deprecated: use 'component' instead"
}
]
},
"properties": {
"type": "object",
"description": "Overrides of parameters that are exposed by the application scope type defined in 'type'.",
"$ref": "#/definitions/componentProperties"
},
"traits": {
"type": "array",
"description": "Specifies the traits to attach to this component instance.",
"items": {
"$ref": "#/definitions/trait"
}
}
},
"required": [
"name",
"type",
"properties"
],
"additionalProperties": true
},
"providerComponent": {
"type": "object",
"description": "This section defines the instances of providers to create with this application configuration.",
"properties": {
"name": {
"type": "string",
"description": "The name of the provider to create an instance of."
},
"type": {
"description": "The type of instance: capability.",
"const": "capability"
},
"properties": {
"type": "object",
"description": "Overrides of parameters that are exposed by the application scope type defined in 'type'.",
"$ref": "#/definitions/providerProperties"
},
"traits": {
"type": "array",
"description": "Specifies the traits to attach to this component instance.",
"items": {
"$ref": "#/definitions/trait"
}
}
},
"required": [
"name",
"type",
"properties"
],
"additionalProperties": true
},
"componentProperties": {
"type": "object",
"description": "Values supplied to parameters that are used to override the parameters exposed by other types.",
"properties": {
"image": {
"type": "string",
"description": "The image reference to use for the component.",
"$comment": "Some systems have upper bounds for name length. Do we limit here?",
"maxLength": 512
},
"id": {
"type": "string",
"description": "The component identifier to use for the component. Will be autogenerated if not supplied.",
"maxLength": 64
},
"config": {
"type": "array",
"items": {
"$ref": "#/definitions/configProperty"
},
"default": [],
"description": "Configuration properties for the provider"
}
},
"required": [
"image"
],
"additionalProperties": false
},
"providerProperties": {
"type": "object",
"description": "Values supplied to parameters that are used to override the parameters exposed by other types.",
"properties": {
"image": {
"type": "string",
"description": "The image reference to use for the provider.",
"$comment": "Some systems have upper bounds for name length. Do we limit here?",
"maxLength": 512
},
"id": {
"type": "string",
"description": "The component identifier to use for the provider.",
"maxLength": 64
},
"config": {
"type": "array",
"items": {
"$ref": "#/definitions/configProperty"
},
"default": [],
"description": "Configuration properties for the provider"
}
},
"required": [
"image"
],
"additionalProperties": false
},
"trait": {
"type": "object",
"description": "The trait section defines traits that will be used in a component instance.",
"properties": {
"type": {
"type": "string",
"description": "The trait type for the instance, whether spreadscaler or link"
},
"properties": {
"type": "object",
"description": "Overrides of parameters that are exposed by the trait type defined in 'type'.",
"anyOf": [
{
"$ref": "#/definitions/linkProperties"
},
{
"$ref": "#/definitions/spreadscalerProperties"
}
]
}
},
"required": [
"type",
"properties"
],
"additionalProperties": false
},
"configProperty": {
"type": "object",
"properties": {
"name": {
"type": "string"
},
"properties": {
"type": "object",
"additionalProperties": {
"type": "string"
}
}
},
"required": [
"name"
],
"additionalProperties": false
},
"linkProperties": {
"target": {
"type": "string",
"description": "The target this link applies to. This should be the name of a component in the manifest"
},
"namespace": {
"type": "string",
"description": "WIT namespace for the link"
},
"package": {
"type": "string",
"description": "WIT package for the link"
},
"interfaces": {
"type": "array",
"items": {
"type": "string"
},
"description": "WIT interfaces for the link"
},
"source_config": {
"type": "array",
"items": {
"$ref": "#/definitions/configProperty"
},
"default": [],
"description": "Configuration properties for the source of the link"
},
"target_config": {
"type": "array",
"items": {
"$ref": "#/definitions/configProperty"
},
"default": [],
"description": "Configuration properties for the target of the link"
},
"name": {
"type": "string",
"description": "The name of this link",
"default": null
},
"required": [
"target",
"namespace",
"package",
"interfaces"
]
},
"spreadscalerProperties": {
"type": "object",
"description": "A properties object (for spreadscaler configuration) is an object whose structure is determined by the spreadscaler property schema. It may be a simple value, or it may be a complex object.",
"properties": {
"instances": {
"anyOf": [
{
"type": "integer",
"title": "instances"
},
{
"type": "integer",
"title": "replicas"
}
]
},
"spread": {
"type": "array",
"items": {
"type": "object",
"description": "A spread object for spreading replicas.",
"properties": {
"name": {
"type": "string"
},
"requirements": {
"additionalProperties": {
"type": "string"
}
},
"weight": {
"type": "integer"
}
},
"required": [
"name",
"requirements"
]
}
}
},
"oneOf": [
{
"required": [
"instances"
]
},
{
"required": [
"replicas"
]
}
]
},
"propertiesObject": {
"anyOf": [
{
"type": "object",
"description": "A properties object (for trait and scope configuration) is an object whose structure is determined by the trait or scope property schema. It may be a simple value, or it may be a complex object.",
"additionalProperties": true
},
{
"type": "string",
"description": "A properties object (for trait and scope configuration) is an object whose structure is determined by the trait or scope property schema. It may be a simple value, or it may be a complex object."
}
]
}
}
}

View File

@ -2,15 +2,14 @@
use std::{
collections::{BTreeMap, HashMap},
error::Error,
hash::{Hash, Hasher},
};
use serde::{Deserialize, Serialize};
use wasmcloud_control_interface::Link;
use wasmcloud_control_interface::InterfaceLinkDefinition;
use crate::{
events::{ComponentScaleFailed, ComponentScaled, Event, ProviderStartFailed, ProviderStarted},
events::{Event, ProviderStartFailed, ProviderStarted},
workers::insert_managed_annotations,
};
@ -45,14 +44,13 @@ impl Command {
/// # Return
/// - The first element in the tuple corresponds to the "success" event a host would output after completing this command
/// - The second element in the tuple corresponds to an optional "failure" event that a host could output if processing fails
pub fn corresponding_event(&self) -> Option<(Event, Option<Event>)> {
pub fn corresponding_event(&self, model_name: &str) -> Option<(Event, Option<Event>)> {
match self {
Command::StartProvider(StartProvider {
annotations,
reference,
host_id,
provider_id,
model_name,
..
}) => {
let mut annotations = annotations.to_owned();
@ -74,39 +72,6 @@ impl Command {
})),
))
}
Command::ScaleComponent(ScaleComponent {
component_id,
host_id,
count,
reference,
annotations,
model_name,
..
}) => {
let mut annotations = annotations.to_owned();
insert_managed_annotations(&mut annotations, model_name);
Some((
Event::ComponentScaled(ComponentScaled {
component_id: component_id.to_owned(),
host_id: host_id.to_owned(),
max_instances: *count as usize,
image_ref: reference.to_owned(),
annotations: annotations.to_owned(),
// We don't know this field from the command
claims: None,
}),
Some(Event::ComponentScaleFailed(ComponentScaleFailed {
component_id: component_id.to_owned(),
host_id: host_id.to_owned(),
max_instances: *count as usize,
image_ref: reference.to_owned(),
annotations: annotations.to_owned(),
// We don't know these fields from the command
error: String::with_capacity(0),
claims: None,
})),
))
}
_ => None,
}
}
@ -236,20 +201,18 @@ pub struct PutLink {
pub model_name: String,
}
impl TryFrom<PutLink> for Link {
type Error = Box<dyn Error + Send + Sync>;
fn try_from(value: PutLink) -> Result<Link, Self::Error> {
Link::builder()
.source_id(&value.source_id)
.target(&value.target)
.name(&value.name)
.wit_namespace(&value.wit_namespace)
.wit_package(&value.wit_package)
.interfaces(value.interfaces)
.source_config(value.source_config)
.target_config(value.target_config)
.build()
impl From<PutLink> for InterfaceLinkDefinition {
fn from(value: PutLink) -> InterfaceLinkDefinition {
InterfaceLinkDefinition {
source_id: value.source_id,
target: value.target,
name: value.name,
wit_namespace: value.wit_namespace,
wit_package: value.wit_package,
interfaces: value.interfaces,
source_config: value.source_config,
target_config: value.target_config,
}
}
}

View File

@ -1,306 +0,0 @@
#[cfg(feature = "http_admin")]
use core::net::SocketAddr;
use std::path::PathBuf;
#[cfg(feature = "cli")]
use clap::Parser;
use wadm_types::api::DEFAULT_WADM_TOPIC_PREFIX;
use crate::nats::StreamPersistence;
#[derive(Clone, Debug)]
#[cfg_attr(feature = "cli", derive(Parser))]
#[cfg_attr(feature = "cli", command(name = clap::crate_name!(), version = clap::crate_version!(), about = "wasmCloud Application Deployment Manager", long_about = None))]
pub struct WadmConfig {
/// The ID for this wadm process. Defaults to a random UUIDv4 if none is provided. This is used
/// to help with debugging when identifying which process is doing the work
#[cfg_attr(
feature = "cli",
arg(short = 'i', long = "host-id", env = "WADM_HOST_ID")
)]
pub host_id: Option<String>,
/// Whether or not to use structured log output (as JSON)
#[cfg_attr(
feature = "cli",
arg(
short = 'l',
long = "structured-logging",
default_value = "false",
env = "WADM_STRUCTURED_LOGGING"
)
)]
pub structured_logging: bool,
/// Whether or not to enable opentelemetry tracing
#[cfg_attr(
feature = "cli",
arg(
short = 't',
long = "tracing",
default_value = "false",
env = "WADM_TRACING_ENABLED"
)
)]
pub tracing_enabled: bool,
/// The endpoint to use for tracing. Setting this flag enables tracing, even if --tracing is set
/// to false. Defaults to http://localhost:4318/v1/traces if not set and tracing is enabled
#[cfg_attr(
feature = "cli",
arg(short = 'e', long = "tracing-endpoint", env = "WADM_TRACING_ENDPOINT")
)]
pub tracing_endpoint: Option<String>,
/// The NATS JetStream domain to connect to
#[cfg_attr(feature = "cli", arg(short = 'd', env = "WADM_JETSTREAM_DOMAIN"))]
pub domain: Option<String>,
/// (Advanced) Tweak the maximum number of jobs to run for handling events and commands. Be
/// careful how you use this as it can affect performance
#[cfg_attr(
feature = "cli",
arg(short = 'j', long = "max-jobs", env = "WADM_MAX_JOBS")
)]
pub max_jobs: Option<usize>,
/// The URL of the nats server you want to connect to
#[cfg_attr(
feature = "cli",
arg(
short = 's',
long = "nats-server",
env = "WADM_NATS_SERVER",
default_value = "127.0.0.1:4222"
)
)]
pub nats_server: String,
/// Use the specified nkey file or seed literal for authentication. Must be used in conjunction with --nats-jwt
#[cfg_attr(
feature = "cli",
arg(
long = "nats-seed",
env = "WADM_NATS_NKEY",
conflicts_with = "nats_creds",
requires = "nats_jwt"
)
)]
pub nats_seed: Option<String>,
/// Use the specified jwt file or literal for authentication. Must be used in conjunction with --nats-nkey
#[cfg_attr(
feature = "cli",
arg(
long = "nats-jwt",
env = "WADM_NATS_JWT",
conflicts_with = "nats_creds",
requires = "nats_seed"
)
)]
pub nats_jwt: Option<String>,
/// (Optional) NATS credential file to use when authenticating
#[cfg_attr(
feature = "cli", arg(
long = "nats-creds-file",
env = "WADM_NATS_CREDS_FILE",
conflicts_with_all = ["nats_seed", "nats_jwt"],
))]
pub nats_creds: Option<PathBuf>,
/// (Optional) NATS TLS certificate file to use when authenticating
#[cfg_attr(
feature = "cli",
arg(long = "nats-tls-ca-file", env = "WADM_NATS_TLS_CA_FILE")
)]
pub nats_tls_ca_file: Option<PathBuf>,
/// Name of the bucket used for storage of lattice state
#[cfg_attr(
feature = "cli",
arg(
long = "state-bucket-name",
env = "WADM_STATE_BUCKET_NAME",
default_value = "wadm_state"
)
)]
pub state_bucket: String,
/// The amount of time in seconds to give for hosts to fail to heartbeat and be removed from the
/// store. By default, this is 70s because it is 2x the host heartbeat interval plus a little padding
#[cfg_attr(
feature = "cli",
arg(
long = "cleanup-interval",
env = "WADM_CLEANUP_INTERVAL",
default_value = "70"
)
)]
pub cleanup_interval: u64,
/// The API topic prefix to use. This is an advanced setting that should only be used if you
/// know what you are doing
#[cfg_attr(
feature = "cli", arg(
long = "api-prefix",
env = "WADM_API_PREFIX",
default_value = DEFAULT_WADM_TOPIC_PREFIX
))]
pub api_prefix: String,
/// This prefix to used for the internal streams. When running in a multitenant environment,
/// clients share the same JS domain (since messages need to come from lattices).
/// Setting a stream prefix makes it possible to have a separate stream for different wadms running in a multitenant environment.
/// This is an advanced setting that should only be used if you know what you are doing.
#[cfg_attr(
feature = "cli",
arg(long = "stream-prefix", env = "WADM_STREAM_PREFIX")
)]
pub stream_prefix: Option<String>,
/// Name of the bucket used for storage of manifests
#[cfg_attr(
feature = "cli",
arg(
long = "manifest-bucket-name",
env = "WADM_MANIFEST_BUCKET_NAME",
default_value = "wadm_manifests"
)
)]
pub manifest_bucket: String,
/// Run wadm in multitenant mode. This is for advanced multitenant use cases with segmented NATS
/// account traffic and not simple cases where all lattices use credentials from the same
/// account. See the deployment guide for more information
#[cfg_attr(
feature = "cli",
arg(long = "multitenant", env = "WADM_MULTITENANT", hide = true)
)]
pub multitenant: bool,
//
// Max bytes configuration for streams. Primarily configurable to enable deployment on NATS infra
// with limited resources.
//
/// Maximum bytes to keep for the state bucket
#[cfg_attr(
feature = "cli", arg(
long = "state-bucket-max-bytes",
env = "WADM_STATE_BUCKET_MAX_BYTES",
default_value_t = -1,
hide = true
))]
pub max_state_bucket_bytes: i64,
/// Maximum bytes to keep for the manifest bucket
#[cfg_attr(
feature = "cli", arg(
long = "manifest-bucket-max-bytes",
env = "WADM_MANIFEST_BUCKET_MAX_BYTES",
default_value_t = -1,
hide = true
))]
pub max_manifest_bucket_bytes: i64,
/// Nats streams storage type
#[cfg_attr(
feature = "cli", arg(
long = "stream-persistence",
env = "WADM_STREAM_PERSISTENCE",
default_value_t = StreamPersistence::File
))]
pub stream_persistence: StreamPersistence,
/// Maximum bytes to keep for the command stream
#[cfg_attr(
feature = "cli", arg(
long = "command-stream-max-bytes",
env = "WADM_COMMAND_STREAM_MAX_BYTES",
default_value_t = -1,
hide = true
))]
pub max_command_stream_bytes: i64,
/// Maximum bytes to keep for the event stream
#[cfg_attr(
feature = "cli", arg(
long = "event-stream-max-bytes",
env = "WADM_EVENT_STREAM_MAX_BYTES",
default_value_t = -1,
hide = true
))]
pub max_event_stream_bytes: i64,
/// Maximum bytes to keep for the event consumer stream
#[cfg_attr(
feature = "cli", arg(
long = "event-consumer-stream-max-bytes",
env = "WADM_EVENT_CONSUMER_STREAM_MAX_BYTES",
default_value_t = -1,
hide = true
))]
pub max_event_consumer_stream_bytes: i64,
/// Maximum bytes to keep for the status stream
#[cfg_attr(
feature = "cli", arg(
long = "status-stream-max-bytes",
env = "WADM_STATUS_STREAM_MAX_BYTES",
default_value_t = -1,
hide = true
))]
pub max_status_stream_bytes: i64,
/// Maximum bytes to keep for the notify stream
#[cfg_attr(
feature = "cli", arg(
long = "notify-stream-max-bytes",
env = "WADM_NOTIFY_STREAM_MAX_BYTES",
default_value_t = -1,
hide = true
))]
pub max_notify_stream_bytes: i64,
/// Maximum bytes to keep for the wasmbus event stream
#[cfg_attr(
feature = "cli", arg(
long = "wasmbus-event-stream-max-bytes",
env = "WADM_WASMBUS_EVENT_STREAM_MAX_BYTES",
default_value_t = -1,
hide = true
))]
pub max_wasmbus_event_stream_bytes: i64,
#[cfg(feature = "http_admin")]
#[cfg_attr(feature = "cli", clap(long = "http-admin", env = "WADM_HTTP_ADMIN"))]
/// HTTP administration endpoint address
pub http_admin: Option<SocketAddr>,
}
impl Default for WadmConfig {
fn default() -> Self {
Self {
host_id: None,
domain: None,
max_jobs: None,
nats_server: "127.0.0.1:4222".to_string(),
nats_seed: None,
nats_jwt: None,
nats_creds: None,
nats_tls_ca_file: None,
state_bucket: "wadm_state".to_string(),
cleanup_interval: 70,
api_prefix: DEFAULT_WADM_TOPIC_PREFIX.to_string(),
stream_prefix: None,
manifest_bucket: "wadm_manifests".to_string(),
multitenant: false,
max_state_bucket_bytes: -1,
max_manifest_bucket_bytes: -1,
stream_persistence: StreamPersistence::File,
max_command_stream_bytes: -1,
max_event_stream_bytes: -1,
max_event_consumer_stream_bytes: -1,
max_status_stream_bytes: -1,
max_notify_stream_bytes: -1,
max_wasmbus_event_stream_bytes: -1,
structured_logging: false,
tracing_enabled: false,
tracing_endpoint: None,
#[cfg(feature = "http_admin")]
http_admin: None,
}
}
}

View File

@ -1,6 +1,5 @@
//! A module for creating and consuming a stream of commands from NATS
use std::collections::HashMap;
use std::pin::Pin;
use std::task::{Context, Poll};
@ -14,7 +13,7 @@ use async_nats::{
use futures::{Stream, TryStreamExt};
use tracing::{error, warn};
use super::{CreateConsumer, ScopedMessage, LATTICE_METADATA_KEY, MULTITENANT_METADATA_KEY};
use super::{CreateConsumer, ScopedMessage};
use crate::commands::*;
/// The name of the durable NATS stream and consumer that contains incoming lattice events
@ -43,19 +42,10 @@ impl CommandConsumer {
return Err(format!("Topic {topic} does not match for lattice ID {lattice_id}").into());
}
let (consumer_name, metadata) = if let Some(prefix) = multitenant_prefix {
(
format!("{COMMANDS_CONSUMER_PREFIX}-{lattice_id}_{prefix}"),
HashMap::from([
(LATTICE_METADATA_KEY.to_string(), lattice_id.to_string()),
(MULTITENANT_METADATA_KEY.to_string(), prefix.to_string()),
]),
)
let consumer_name = if let Some(prefix) = multitenant_prefix {
format!("{COMMANDS_CONSUMER_PREFIX}-{lattice_id}_{prefix}")
} else {
(
format!("{COMMANDS_CONSUMER_PREFIX}-{lattice_id}"),
HashMap::from([(LATTICE_METADATA_KEY.to_string(), lattice_id.to_string())]),
)
format!("{COMMANDS_CONSUMER_PREFIX}-{lattice_id}")
};
let consumer = stream
.get_or_create_consumer(
@ -71,7 +61,6 @@ impl CommandConsumer {
max_deliver: 3,
deliver_policy: async_nats::jetstream::consumer::DeliverPolicy::All,
filter_subject: topic.to_owned(),
metadata,
..Default::default()
},
)

View File

@ -1,6 +1,5 @@
//! A module for creating and consuming a stream of events from a wasmcloud lattice
use std::collections::HashMap;
use std::convert::TryFrom;
use std::pin::Pin;
use std::task::{Context, Poll};
@ -15,11 +14,11 @@ use async_nats::{
use futures::{Stream, TryStreamExt};
use tracing::{debug, error, warn};
use super::{CreateConsumer, ScopedMessage, LATTICE_METADATA_KEY, MULTITENANT_METADATA_KEY};
use super::{CreateConsumer, ScopedMessage};
use crate::events::*;
/// The name of the durable NATS stream and consumer that contains incoming lattice events
pub const EVENTS_CONSUMER_PREFIX: &str = "wadm_event_consumer";
pub const EVENTS_CONSUMER_PREFIX: &str = "wadm_events";
/// A stream of all events of a lattice, consumed from a durable NATS stream and consumer
pub struct EventConsumer {
@ -43,19 +42,10 @@ impl EventConsumer {
if !topic.contains(lattice_id) {
return Err(format!("Topic {topic} does not match for lattice ID {lattice_id}").into());
}
let (consumer_name, metadata) = if let Some(prefix) = multitenant_prefix {
(
format!("{EVENTS_CONSUMER_PREFIX}-{lattice_id}_{prefix}"),
HashMap::from([
(LATTICE_METADATA_KEY.to_string(), lattice_id.to_string()),
(MULTITENANT_METADATA_KEY.to_string(), prefix.to_string()),
]),
)
let consumer_name = if let Some(prefix) = multitenant_prefix {
format!("{EVENTS_CONSUMER_PREFIX}-{lattice_id}_{prefix}")
} else {
(
format!("{EVENTS_CONSUMER_PREFIX}-{lattice_id}"),
HashMap::from([(LATTICE_METADATA_KEY.to_string(), lattice_id.to_string())]),
)
format!("{EVENTS_CONSUMER_PREFIX}-{lattice_id}")
};
let consumer = stream
.get_or_create_consumer(
@ -71,7 +61,6 @@ impl EventConsumer {
max_deliver: 3,
deliver_policy: async_nats::jetstream::consumer::DeliverPolicy::All,
filter_subject: topic.to_owned(),
metadata,
..Default::default()
},
)

View File

@ -9,8 +9,6 @@ use tokio::{
};
use tracing::{error, instrument, trace, warn, Instrument};
use crate::consumers::{LATTICE_METADATA_KEY, MULTITENANT_METADATA_KEY};
use super::{CreateConsumer, ScopedMessage};
/// A convenience type for returning work results
@ -143,24 +141,15 @@ impl<C> ConsumerManager<C> {
}
};
// Now that wadm is using NATS 2.10, the lattice and multitenant prefix are stored in the consumer metadata
// as a fallback for older versions, we can still extract it from the consumer name in the
// form `<consumer_prefix>-<lattice_prefix>_<multitenant_prefix>`
let (lattice_id, multitenant_prefix) = match (info.config.metadata.get(LATTICE_METADATA_KEY), info.config.metadata.get(MULTITENANT_METADATA_KEY)) {
(Some(lattice), Some(multitenant_prefix)) => {
trace!(%lattice, %multitenant_prefix, "Found lattice and multitenant prefix in consumer metadata");
(lattice.to_owned(), Some(multitenant_prefix.to_owned()))
}
(Some(lattice), None) => {
trace!(%lattice, "Found lattice in consumer metadata");
(lattice.to_owned(), None)
}
_ => {
match extract_lattice_and_multitenant(&info.name) {
(Some(id), prefix) => (id, prefix),
(None, _) => return None,
}
}
// TODO: This is somewhat brittle as we could change naming schemes, but it is
// good enough for now. We are just taking the name (which should be of the
// format `<consumer_prefix>-<lattice_prefix>_<multitenant_prefix>`), but this makes sure
// we are always getting the last thing in case of other underscores
//
// When NATS 2.10 is out, store this as metadata on the stream.
let (lattice_id, multitenant_prefix) = match extract_lattice_and_multitenant(&info.name) {
(Some(id), prefix) => (id, prefix),
(None, _) => return None,
};
// Don't create multitenant consumers if running in single tenant mode, and vice versa

View File

@ -16,9 +16,6 @@ pub mod manager;
/// The default time given for a command to ack. This is longer than events due to the possible need for more processing time
pub const DEFAULT_ACK_TIME: Duration = Duration::from_secs(2);
pub const LATTICE_METADATA_KEY: &str = "lattice";
pub const MULTITENANT_METADATA_KEY: &str = "multitenant_prefix";
pub use commands::*;
pub use events::*;

View File

@ -11,7 +11,9 @@ use std::{
use cloudevents::{AttributesReader, Data, Event as CloudEvent, EventBuilder, EventBuilderV10};
use serde::{Deserialize, Serialize};
use thiserror::Error;
use wasmcloud_control_interface::{ComponentDescription, Link, ProviderDescription};
use wasmcloud_control_interface::{
ComponentDescription, InterfaceLinkDefinition, ProviderDescription,
};
use wadm_types::Manifest;
@ -300,6 +302,7 @@ pub struct ComponentScaled {
pub claims: Option<ComponentClaims>,
pub image_ref: String,
pub max_instances: usize,
// TODO: Once we update to the 1.0 release candidate, this will be component_id
pub component_id: String,
#[serde(default)]
pub host_id: String,
@ -318,6 +321,7 @@ pub struct ComponentScaleFailed {
pub claims: Option<ComponentClaims>,
pub image_ref: String,
pub max_instances: usize,
// TODO: Once we update to the 1.0 release candidate, this will be component_id
pub component_id: String,
#[serde(default)]
pub host_id: String,
@ -422,7 +426,7 @@ event_impl!(
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub struct LinkdefSet {
#[serde(flatten)]
pub linkdef: Link,
pub linkdef: InterfaceLinkDefinition,
}
event_impl!(LinkdefSet, "com.wasmcloud.lattice.linkdef_set");
@ -459,6 +463,7 @@ event_impl!(ConfigDeleted, "com.wasmcloud.lattice.config_deleted");
pub struct HostStarted {
pub labels: HashMap<String, String>,
pub friendly_name: String,
// TODO: Parse as nkey?
#[serde(default)]
pub id: String,
}
@ -473,6 +478,7 @@ event_impl!(
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
pub struct HostStopped {
pub labels: HashMap<String, String>,
// TODO: Parse as nkey?
#[serde(default)]
pub id: String,
}
@ -565,8 +571,7 @@ mod test {
#[test]
fn test_all_supported_events() {
let raw = std::fs::read("../../tests/fixtures/manifests/events.json")
.expect("Unable to load test data");
let raw = std::fs::read("./test/data/events.json").expect("Unable to load test data");
let all_events: Vec<cloudevents::Event> = serde_json::from_slice(&raw).unwrap();

View File

@ -1,40 +1,9 @@
use std::sync::Arc;
use std::time::Duration;
use anyhow::Result;
use async_nats::jetstream::{stream::Stream, Context};
use config::WadmConfig;
use tokio::{sync::Semaphore, task::JoinSet};
use tracing::log::debug;
#[cfg(feature = "http_admin")]
use anyhow::Context as _;
#[cfg(feature = "http_admin")]
use hyper::body::Bytes;
#[cfg(feature = "http_admin")]
use hyper_util::rt::{TokioExecutor, TokioIo};
#[cfg(feature = "http_admin")]
use tokio::net::TcpListener;
use crate::{
connections::ControlClientConstructor,
consumers::{
manager::{ConsumerManager, WorkerCreator},
*,
},
nats_utils::LatticeIdParser,
scaler::manager::{ScalerManager, WADM_NOTIFY_PREFIX},
server::{ManifestNotifier, Server},
storage::{nats_kv::NatsKvStore, reaper::Reaper},
workers::{CommandPublisher, CommandWorker, EventWorker, StatusPublisher},
};
pub use nats::StreamPersistence;
pub mod commands;
pub mod config;
pub mod consumers;
pub mod events;
pub mod mirror;
pub mod nats_utils;
pub mod publisher;
pub mod scaler;
@ -42,10 +11,7 @@ pub mod server;
pub mod storage;
pub mod workers;
mod connections;
pub(crate) mod model;
mod nats;
mod observer;
#[cfg(test)]
pub mod test_util;
@ -60,10 +26,9 @@ pub const DEFAULT_MULTITENANT_EVENTS_TOPIC: &str = "*.wasmbus.evt.*.>";
pub const DEFAULT_COMMANDS_TOPIC: &str = "wadm.cmd.*";
/// Default topic to listen to for all status updates. wadm.status.<lattice_id>.<manifest_name>
pub const DEFAULT_STATUS_TOPIC: &str = "wadm.status.*.*";
/// Default topic to listen to for all wadm event updates
pub const DEFAULT_WADM_EVENTS_TOPIC: &str = "wadm.evt.*.>";
/// Default internal wadm event consumer listen topic for the merged wadm and wasmbus events stream.
pub const DEFAULT_WADM_EVENT_CONSUMER_TOPIC: &str = "wadm_event_consumer.evt.*.>";
/// The default listen topic for the merged wadm events stream. This topic is an amalgamation of
/// wasmbus.evt topics plus the wadm.internal topics
pub const DEFAULT_WADM_EVENTS_TOPIC: &str = "wadm.evt.*";
/// Managed by annotation used for labeling things properly in wadm
pub const MANAGED_BY_ANNOTATION: &str = "wasmcloud.dev/managed-by";
/// Identifier for managed by annotation. This is the value [`MANAGED_BY_ANNOTATION`] is set to
@ -74,406 +39,3 @@ pub const APP_SPEC_ANNOTATION: &str = "wasmcloud.dev/appspec";
pub const SCALER_KEY: &str = "wasmcloud.dev/scaler";
/// The default link name. In the future, this will likely be pulled in from another crate
pub const DEFAULT_LINK_NAME: &str = "default";
/// Default stream name for wadm events
pub const DEFAULT_WADM_EVENT_STREAM_NAME: &str = "wadm_events";
/// Default stream name for wadm event consumer
pub const DEFAULT_WADM_EVENT_CONSUMER_STREAM_NAME: &str = "wadm_event_consumer";
/// Default stream name for wadm commands
pub const DEFAULT_COMMAND_STREAM_NAME: &str = "wadm_commands";
/// Default stream name for wadm status
pub const DEFAULT_STATUS_STREAM_NAME: &str = "wadm_status";
/// Default stream name for wadm notifications
pub const DEFAULT_NOTIFY_STREAM_NAME: &str = "wadm_notify";
/// Default stream name for wasmbus events
pub const DEFAULT_WASMBUS_EVENT_STREAM_NAME: &str = "wasmbus_events";
/// Start wadm with the provided [WadmConfig], returning [JoinSet] with two tasks:
/// 1. The server task that listens for API requests
/// 2. The observer task that listens for events and commands
///
/// When embedding wadm in another application, this function should be called to start the wadm
/// server and observer tasks.
///
/// # Usage
///
/// ```no_run
/// async {
/// let config = wadm::config::WadmConfig::default();
/// let mut wadm = wadm::start_wadm(config).await.expect("should start wadm");
/// tokio::select! {
/// res = wadm.join_next() => {
/// match res {
/// Some(Ok(_)) => {
/// tracing::info!("WADM has exited successfully");
/// std::process::exit(0);
/// }
/// Some(Err(e)) => {
/// tracing::error!("WADM has exited with an error: {:?}", e);
/// std::process::exit(1);
/// }
/// None => {
/// tracing::info!("WADM server did not start");
/// std::process::exit(0);
/// }
/// }
/// }
/// _ = tokio::signal::ctrl_c() => {
/// tracing::info!("Received Ctrl+C, shutting down");
/// std::process::exit(0);
/// }
/// }
/// };
/// ```
pub async fn start_wadm(config: WadmConfig) -> Result<JoinSet<Result<()>>> {
// Build storage adapter for lattice state (on by default)
let (client, context) = nats::get_client_and_context(
config.nats_server.clone(),
config.domain.clone(),
config.nats_seed.clone(),
config.nats_jwt.clone(),
config.nats_creds.clone(),
config.nats_tls_ca_file.clone(),
)
.await?;
// TODO: We will probably need to set up all the flags (like lattice prefix and topic prefix) down the line
let connection_pool = ControlClientConstructor::new(client.clone(), None);
let trimmer: &[_] = &['.', '>', '*'];
let store = nats::ensure_kv_bucket(
&context,
config.state_bucket,
1,
config.max_state_bucket_bytes,
config.stream_persistence.into(),
)
.await?;
let state_storage = NatsKvStore::new(store);
let manifest_storage = nats::ensure_kv_bucket(
&context,
config.manifest_bucket,
1,
config.max_manifest_bucket_bytes,
config.stream_persistence.into(),
)
.await?;
let internal_stream_name = |stream_name: &str| -> String {
match config.stream_prefix.clone() {
Some(stream_prefix) => {
format!(
"{}.{}",
stream_prefix.trim_end_matches(trimmer),
stream_name
)
}
None => stream_name.to_string(),
}
};
debug!("Ensuring wadm event stream");
let event_stream = nats::ensure_limits_stream(
&context,
internal_stream_name(DEFAULT_WADM_EVENT_STREAM_NAME),
vec![DEFAULT_WADM_EVENTS_TOPIC.to_owned()],
Some(
"A stream that stores all events coming in on the wadm.evt subject in a cluster"
.to_string(),
),
config.max_event_stream_bytes,
config.stream_persistence.into(),
)
.await?;
debug!("Ensuring command stream");
let command_stream = nats::ensure_stream(
&context,
internal_stream_name(DEFAULT_COMMAND_STREAM_NAME),
vec![DEFAULT_COMMANDS_TOPIC.to_owned()],
Some("A stream that stores all commands for wadm".to_string()),
config.max_command_stream_bytes,
config.stream_persistence.into(),
)
.await?;
let status_stream = nats::ensure_status_stream(
&context,
internal_stream_name(DEFAULT_STATUS_STREAM_NAME),
vec![DEFAULT_STATUS_TOPIC.to_owned()],
config.max_status_stream_bytes,
config.stream_persistence.into(),
)
.await?;
debug!("Ensuring wasmbus event stream");
// Remove the previous wadm_(multitenant)_mirror streams so that they don't
// prevent us from creating the new wasmbus_(multitenant)_events stream
// TODO(joonas): Remove this some time in the future once we're confident
// enough that there are no more wadm_(multitenant)_mirror streams around.
for mirror_stream_name in &["wadm_mirror", "wadm_multitenant_mirror"] {
if (context.get_stream(mirror_stream_name).await).is_ok() {
context.delete_stream(mirror_stream_name).await?;
}
}
let wasmbus_event_subjects = match config.multitenant {
true => vec![DEFAULT_MULTITENANT_EVENTS_TOPIC.to_owned()],
false => vec![DEFAULT_EVENTS_TOPIC.to_owned()],
};
let wasmbus_event_stream = nats::ensure_limits_stream(
&context,
DEFAULT_WASMBUS_EVENT_STREAM_NAME.to_string(),
wasmbus_event_subjects.clone(),
Some(
"A stream that stores all events coming in on the wasmbus.evt subject in a cluster"
.to_string(),
),
config.max_wasmbus_event_stream_bytes,
config.stream_persistence.into(),
)
.await?;
debug!("Ensuring notify stream");
let notify_stream = nats::ensure_notify_stream(
&context,
DEFAULT_NOTIFY_STREAM_NAME.to_owned(),
vec![format!("{WADM_NOTIFY_PREFIX}.*")],
config.max_notify_stream_bytes,
config.stream_persistence.into(),
)
.await?;
debug!("Ensuring event consumer stream");
let event_consumer_stream = nats::ensure_event_consumer_stream(
&context,
DEFAULT_WADM_EVENT_CONSUMER_STREAM_NAME.to_owned(),
DEFAULT_WADM_EVENT_CONSUMER_TOPIC.to_owned(),
vec![&wasmbus_event_stream, &event_stream],
Some(
"A stream that sources from wadm_events and wasmbus_events for wadm event consumer's use"
.to_string(),
),
config.max_event_consumer_stream_bytes,
config.stream_persistence.into(),
)
.await?;
debug!("Creating event consumer manager");
let permit_pool = Arc::new(Semaphore::new(
config.max_jobs.unwrap_or(Semaphore::MAX_PERMITS),
));
let event_worker_creator = EventWorkerCreator {
state_store: state_storage.clone(),
manifest_store: manifest_storage.clone(),
pool: connection_pool.clone(),
command_topic_prefix: DEFAULT_COMMANDS_TOPIC.trim_matches(trimmer).to_owned(),
publisher: context.clone(),
notify_stream,
status_stream: status_stream.clone(),
};
let events_manager: ConsumerManager<EventConsumer> = ConsumerManager::new(
permit_pool.clone(),
event_consumer_stream,
event_worker_creator.clone(),
config.multitenant,
)
.await;
debug!("Creating command consumer manager");
let command_worker_creator = CommandWorkerCreator {
pool: connection_pool,
};
let commands_manager: ConsumerManager<CommandConsumer> = ConsumerManager::new(
permit_pool.clone(),
command_stream,
command_worker_creator.clone(),
config.multitenant,
)
.await;
// TODO(thomastaylor312): We might want to figure out how not to run this globally. Doing a
// synthetic event sent to the stream could be nice, but all the wadm processes would still fire
// off that tick, resulting in multiple people handling. We could maybe get it to work with the
// right duplicate window, but we have no idea when each process could fire a tick. Worst case
// scenario right now is that multiple fire simultaneously and a few of them just delete nothing
let reaper = Reaper::new(
state_storage.clone(),
Duration::from_secs(config.cleanup_interval / 2),
[],
);
let wadm_event_prefix = DEFAULT_WADM_EVENTS_TOPIC.trim_matches(trimmer);
debug!("Creating lattice observer");
let observer = observer::Observer {
parser: LatticeIdParser::new("wasmbus", config.multitenant),
command_manager: commands_manager,
event_manager: events_manager,
reaper,
client: client.clone(),
command_worker_creator,
event_worker_creator,
};
debug!("Subscribing to API topic");
let server = Server::new(
manifest_storage,
client,
Some(&config.api_prefix),
config.multitenant,
status_stream,
ManifestNotifier::new(wadm_event_prefix, context),
)
.await?;
let mut tasks = JoinSet::new();
#[cfg(feature = "http_admin")]
if let Some(addr) = config.http_admin {
debug!("Setting up HTTP administration endpoint");
let socket = TcpListener::bind(addr)
.await
.context("failed to bind on HTTP administation endpoint")?;
let svc = hyper::service::service_fn(move |req| {
const OK: &str = r#"{"status":"ok"}"#;
async move {
let (http::request::Parts { method, uri, .. }, _) = req.into_parts();
match (method.as_str(), uri.path()) {
("HEAD", "/livez") => Ok(http::Response::default()),
("GET", "/livez") => Ok(http::Response::new(http_body_util::Full::new(
Bytes::from(OK),
))),
(method, "/livez") => http::Response::builder()
.status(http::StatusCode::METHOD_NOT_ALLOWED)
.body(http_body_util::Full::new(Bytes::from(format!(
"method `{method}` not supported for path `/livez`"
)))),
("HEAD", "/readyz") => Ok(http::Response::default()),
("GET", "/readyz") => Ok(http::Response::new(http_body_util::Full::new(
Bytes::from(OK),
))),
(method, "/readyz") => http::Response::builder()
.status(http::StatusCode::METHOD_NOT_ALLOWED)
.body(http_body_util::Full::new(Bytes::from(format!(
"method `{method}` not supported for path `/readyz`"
)))),
(.., path) => http::Response::builder()
.status(http::StatusCode::NOT_FOUND)
.body(http_body_util::Full::new(Bytes::from(format!(
"unknown endpoint `{path}`"
)))),
}
}
});
let srv = hyper_util::server::conn::auto::Builder::new(TokioExecutor::new());
tasks.spawn(async move {
loop {
let stream = match socket.accept().await {
Ok((stream, _)) => stream,
Err(err) => {
tracing::error!(?err, "failed to accept HTTP administration connection");
continue;
}
};
if let Err(err) = srv.serve_connection(TokioIo::new(stream), svc).await {
tracing::error!(?err, "failed to serve HTTP administration connection");
}
}
});
}
// Subscribe and handle API requests
tasks.spawn(server.serve());
// Observe and handle events
tasks.spawn(observer.observe(wasmbus_event_subjects));
Ok(tasks)
}
#[derive(Clone)]
struct CommandWorkerCreator {
pool: ControlClientConstructor,
}
#[async_trait::async_trait]
impl WorkerCreator for CommandWorkerCreator {
type Output = CommandWorker;
async fn create(
&self,
lattice_id: &str,
multitenant_prefix: Option<&str>,
) -> anyhow::Result<Self::Output> {
let client = self.pool.get_connection(lattice_id, multitenant_prefix);
Ok(CommandWorker::new(client))
}
}
#[derive(Clone)]
struct EventWorkerCreator<StateStore> {
state_store: StateStore,
manifest_store: async_nats::jetstream::kv::Store,
pool: ControlClientConstructor,
command_topic_prefix: String,
publisher: Context,
notify_stream: Stream,
status_stream: Stream,
}
#[async_trait::async_trait]
impl<StateStore> WorkerCreator for EventWorkerCreator<StateStore>
where
StateStore: crate::storage::Store + Send + Sync + Clone + 'static,
{
type Output = EventWorker<StateStore, wasmcloud_control_interface::Client, Context>;
async fn create(
&self,
lattice_id: &str,
multitenant_prefix: Option<&str>,
) -> anyhow::Result<Self::Output> {
let client = self.pool.get_connection(lattice_id, multitenant_prefix);
let command_publisher = CommandPublisher::new(
self.publisher.clone(),
&format!("{}.{lattice_id}", self.command_topic_prefix),
);
let status_publisher = StatusPublisher::new(
self.publisher.clone(),
Some(self.status_stream.clone()),
&format!("wadm.status.{lattice_id}"),
);
let manager = ScalerManager::new(
self.publisher.clone(),
self.notify_stream.clone(),
lattice_id,
multitenant_prefix,
self.state_store.clone(),
self.manifest_store.clone(),
command_publisher.clone(),
status_publisher.clone(),
client.clone(),
)
.await?;
Ok(EventWorker::new(
self.state_store.clone(),
client,
command_publisher,
status_publisher,
manager,
))
}
}

View File

@ -0,0 +1,152 @@
//! This is a temporary workaround to let us combine multiple topics into a single stream so
//! consumers work properly. This will be removed once NATS 2.10 is out and we have upgraded to
//! using it in wasmcloud projects
use std::{collections::HashMap, sync::Arc};
use async_nats::{
jetstream::{
consumer::pull::{Config as PullConfig, Stream as MessageStream},
stream::Stream as JsStream,
Context,
},
Error as NatsError, HeaderMap,
};
use bytes::Bytes;
use futures::StreamExt;
use tokio::{sync::RwLock, task::JoinHandle};
use tracing::{error, instrument, trace, warn};
type WorkHandles = Arc<RwLock<HashMap<String, JoinHandle<anyhow::Result<()>>>>>;
/// A simple NATS consumer that takes each incoming message and maps it to `{prefix}.{lattice-id}`
/// (e.g. `wadm.evt.default`)
pub struct Mirror {
stream: JsStream,
prefix: String,
handles: WorkHandles,
}
impl Mirror {
/// Returns a new Mirror for the given stream
pub fn new(stream: JsStream, prefix: &str) -> Mirror {
Mirror {
stream,
prefix: prefix.to_owned(),
handles: Arc::new(RwLock::new(HashMap::new())),
}
}
#[instrument(level = "debug", skip(self))]
pub async fn monitor_lattice(
&self,
subject: &str,
lattice_id: &str,
multitenant_prefix: Option<&str>,
) -> Result<(), NatsError> {
if let Some(handle) = self.handles.read().await.get(lattice_id) {
if !handle.is_finished() {
return Ok(());
}
warn!("Handle was marked as completed. Starting monitor again");
}
let consumer_name = if let Some(prefix) = multitenant_prefix {
format!("wadm_mirror-{lattice_id}_{prefix}")
} else {
format!("wadm_mirror-{lattice_id}")
};
trace!("Creating mirror consumer for lattice");
let consumer = self
.stream
.get_or_create_consumer(
&consumer_name,
PullConfig {
durable_name: Some(consumer_name.clone()),
name: Some(consumer_name.clone()),
description: Some(format!("Durable wadm mirror consumer for {lattice_id}")),
ack_policy: async_nats::jetstream::consumer::AckPolicy::Explicit,
ack_wait: std::time::Duration::from_secs(2),
max_deliver: 3,
deliver_policy: async_nats::jetstream::consumer::DeliverPolicy::All,
filter_subject: subject.to_owned(),
..Default::default()
},
)
.await?;
let messages = consumer
.stream()
.max_messages_per_batch(1)
.messages()
.await?;
let handle = tokio::spawn(mirror_worker(
messages,
format!("{}.{lattice_id}", self.prefix),
));
self.handles
.write()
.await
.insert(lattice_id.to_owned(), handle);
Ok(())
}
}
#[instrument(level = "info", skip(messages))]
async fn mirror_worker(mut messages: MessageStream, publish_topic: String) -> anyhow::Result<()> {
loop {
match messages.next().await {
Some(Ok(msg)) => {
// NOTE(thomastaylor312): I can't actually consume the payload because I can't ack
// (without copy pasting the ack code) due to the partial move. I am working with
// the async_nats maintainers to see if we can add something to get around this, but
// in the meantime, we're just gonna deal with it
if let Err(e) = republish(
&msg.context,
publish_topic.clone(),
msg.message.headers.clone().unwrap_or_default(),
msg.message.payload.clone(),
)
.await
{
error!(error = ?e, "Unable to republish message. Will nak and retry");
if let Err(e) = msg
.ack_with(async_nats::jetstream::AckKind::Nak(None))
.await
{
warn!(error = ?e, "Unable to nak. This message will timeout and redeliver");
}
} else if let Err(e) = msg.double_ack().await {
// There isn't much we can do if this happens as this means we
// successfully published, but couldn't nak
error!(error = ?e, "Unable to ack. This message will timeout and redeliver a duplicate message");
}
}
Some(Err(e)) => {
warn!(error = ?e, "Error when processing message to mirror");
continue;
}
None => {
error!("Mirror stream stopped processing");
anyhow::bail!("Mirror stream stopped processing")
}
}
}
}
async fn republish(
context: &Context,
topic: String,
headers: HeaderMap,
payload: Bytes,
) -> anyhow::Result<()> {
// NOTE(thomastaylor312): A future improvement could be retries here
let acker = context
.publish_with_headers(topic, headers, payload)
.await
.map_err(|e| anyhow::anyhow!("Unable to republish message: {e:?}"))?;
acker
.await
.map(|_| ())
.map_err(|e| anyhow::anyhow!("Error waiting for message acknowledgement {e:?}"))
}

View File

@ -159,8 +159,7 @@ mod test {
#[test]
fn test_versioning() {
let mut manifest = deserialize_yaml("../../tests/fixtures/manifests/simple2.yaml")
.expect("Should be able to parse");
let mut manifest = deserialize_yaml("./oam/simple2.yaml").expect("Should be able to parse");
let mut stored = StoredManifest::default();
assert!(

View File

@ -19,8 +19,6 @@ use crate::events::{ConfigDeleted, ConfigSet};
use crate::workers::ConfigSource;
use crate::{commands::Command, events::Event, scaler::Scaler};
const CONFIG_SCALER_KIND: &str = "ConfigScaler";
pub struct ConfigScaler<ConfigSource> {
config_bucket: ConfigSource,
id: String,
@ -38,14 +36,6 @@ impl<C: ConfigSource + Send + Sync + Clone> Scaler for ConfigScaler<C> {
&self.id
}
fn kind(&self) -> &str {
CONFIG_SCALER_KIND
}
fn name(&self) -> String {
self.config_name.to_string()
}
async fn status(&self) -> StatusInfo {
let _ = self.reconcile().await;
self.status.read().await.to_owned()

View File

@ -1,780 +0,0 @@
//! Contains code for converting the list of [`Component`]s in an application into a list of [`Scaler`]s
//! that are responsible for monitoring and enforcing the desired state of a lattice
use std::{collections::HashMap, time::Duration};
use anyhow::Result;
use tracing::{error, warn};
use wadm_types::{
api::StatusInfo, CapabilityProperties, Component, ComponentProperties, ConfigProperty,
LinkProperty, Policy, Properties, SecretProperty, SharedApplicationComponentProperties,
SpreadScalerProperty, Trait, TraitProperty, DAEMONSCALER_TRAIT, LINK_TRAIT, SPREADSCALER_TRAIT,
};
use wasmcloud_secrets_types::SECRET_PREFIX;
use crate::{
publisher::Publisher,
scaler::{
spreadscaler::{link::LINK_SCALER_KIND, ComponentSpreadScaler, SPREAD_SCALER_KIND},
statusscaler::StatusScaler,
Scaler,
},
storage::{snapshot::SnapshotStore, ReadStore},
workers::{ConfigSource, LinkSource, SecretSource},
DEFAULT_LINK_NAME,
};
use super::{
configscaler::ConfigScaler,
daemonscaler::{provider::ProviderDaemonScaler, ComponentDaemonScaler},
secretscaler::SecretScaler,
spreadscaler::{
link::{LinkScaler, LinkScalerConfig},
provider::{ProviderSpreadConfig, ProviderSpreadScaler},
},
BackoffWrapper,
};
pub(crate) type BoxedScaler = Box<dyn Scaler + Send + Sync + 'static>;
pub(crate) type ScalerList = Vec<BoxedScaler>;
const EMPTY_TRAIT_VEC: Vec<Trait> = Vec::new();
/// Converts a list of manifest [`Component`]s into a [`ScalerList`], resolving shared application
/// references, links, configuration and secrets as necessary.
///
/// # Arguments
/// * `components` - The list of components to convert
/// * `policies` - The policies to use when creating the scalers so they can access secrets
/// * `lattice_id` - The lattice id the scalers operate on
/// * `notifier` - The publisher to use when creating the scalers so they can report status
/// * `name` - The name of the manifest that the scalers are being created for
/// * `notifier_subject` - The subject to use when creating the scalers so they can report status
/// * `snapshot_data` - The store to use when creating the scalers so they can access lattice state
pub(crate) fn manifest_components_to_scalers<S, P, L>(
components: &[Component],
policies: &HashMap<&String, &Policy>,
lattice_id: &str,
manifest_name: &str,
notifier_subject: &str,
notifier: &P,
snapshot_data: &SnapshotStore<S, L>,
) -> ScalerList
where
S: ReadStore + Send + Sync + Clone + 'static,
P: Publisher + Clone + Send + Sync + 'static,
L: LinkSource + ConfigSource + SecretSource + Clone + Send + Sync + 'static,
{
let mut scalers: ScalerList = Vec::new();
components
.iter()
.for_each(|component| match &component.properties {
Properties::Component { properties } => {
// Determine if this component is contained in this manifest or a shared application
let (application_name, component_name) = match resolve_manifest_component(
manifest_name,
&component.name,
properties.image.as_ref(),
properties.application.as_ref(),
) {
Ok(names) => names,
Err(err) => {
error!(err);
scalers.push(Box::new(StatusScaler::new(
uuid::Uuid::new_v4().to_string(),
SPREAD_SCALER_KIND,
&component.name,
StatusInfo::failed(err),
)) as BoxedScaler);
return;
}
};
component_scalers(
&mut scalers,
components,
properties,
component.traits.as_ref(),
manifest_name,
application_name,
component_name,
lattice_id,
policies,
notifier_subject,
notifier,
snapshot_data,
)
}
Properties::Capability { properties } => {
// Determine if this component is contained in this manifest or a shared application
let (application_name, component_name) = match resolve_manifest_component(
manifest_name,
&component.name,
properties.image.as_ref(),
properties.application.as_ref(),
) {
Ok(names) => names,
Err(err) => {
error!(err);
scalers.push(Box::new(StatusScaler::new(
uuid::Uuid::new_v4().to_string(),
SPREAD_SCALER_KIND,
&component.name,
StatusInfo::failed(err),
)) as BoxedScaler);
return;
}
};
provider_scalers(
&mut scalers,
components,
properties,
component.traits.as_ref(),
manifest_name,
application_name,
component_name,
lattice_id,
policies,
notifier_subject,
notifier,
snapshot_data,
)
}
});
scalers
}
/// Helper function, primarily to remove nesting, that extends a [`ScalerList`] with all scalers
/// from a (Wasm) component [`Component`]
///
/// # Arguments
/// * `scalers` - The list of scalers to extend
/// * `components` - The list of components to convert
/// * `properties` - The properties of the component to convert
/// * `traits` - The traits of the component to convert
/// * `manifest_name` - The name of the manifest that the scalers are being created for
/// * `application_name` - The name of the application that the scalers are being created for
/// * `component_name` - The name of the component to convert
/// * **The following arguments are required to create scalers, passed directly through to the scaler
/// * `lattice_id` - The lattice id the scalers operate on
/// * `policies` - The policies to use when creating the scalers so they can access secrets
/// * `notifier_subject` - The subject to use when creating the scalers so they can report status
/// * `notifier` - The publisher to use when creating the scalers so they can report status
/// * `snapshot_data` - The store to use when creating the scalers so they can access lattice state
#[allow(clippy::too_many_arguments)]
fn component_scalers<S, P, L>(
scalers: &mut ScalerList,
components: &[Component],
properties: &ComponentProperties,
traits: Option<&Vec<Trait>>,
manifest_name: &str,
application_name: &str,
component_name: &str,
lattice_id: &str,
policies: &HashMap<&String, &Policy>,
notifier_subject: &str,
notifier: &P,
snapshot_data: &SnapshotStore<S, L>,
) where
S: ReadStore + Send + Sync + Clone + 'static,
P: Publisher + Clone + Send + Sync + 'static,
L: LinkSource + ConfigSource + SecretSource + Clone + Send + Sync + 'static,
{
scalers.extend(traits.unwrap_or(&EMPTY_TRAIT_VEC).iter().filter_map(|trt| {
// If an image is specified, then it's a component in the same manifest. Otherwise, it's a shared component
let component_id = if properties.image.is_some() {
compute_component_id(manifest_name, properties.id.as_ref(), component_name)
} else {
compute_component_id(application_name, properties.id.as_ref(), component_name)
};
let (config_scalers, mut config_names) =
config_to_scalers(snapshot_data, manifest_name, &properties.config);
let (secret_scalers, secret_names) = secrets_to_scalers(
snapshot_data,
manifest_name,
&properties.secrets,
policies,
);
config_names.append(&mut secret_names.clone());
// TODO(#451): Consider a way to report on status of a shared component
match (trt.trait_type.as_str(), &trt.properties, &properties.image) {
// Shared application components already have their own spread/daemon scalers, you
// cannot modify them from another manifest
(SPREADSCALER_TRAIT, TraitProperty::SpreadScaler(_), None) => {
warn!(
"Unsupported SpreadScaler trait specified for a shared component {component_name}"
);
None
}
(DAEMONSCALER_TRAIT, TraitProperty::SpreadScaler(_), None) => {
warn!(
"Unsupported DaemonScaler trait specified for a shared component {component_name}"
);
None
}
(SPREADSCALER_TRAIT, TraitProperty::SpreadScaler(p), Some(image_ref)) => {
// If the image is not specified, then it's a reference to a shared provider
// in a different manifest
Some(Box::new(BackoffWrapper::new(
ComponentSpreadScaler::new(
snapshot_data.clone(),
image_ref.clone(),
component_id,
lattice_id.to_owned(),
application_name.to_owned(),
p.to_owned(),
component_name,
config_names,
),
notifier.clone(),
config_scalers,
secret_scalers,
notifier_subject,
application_name,
Some(Duration::from_secs(5)),
)) as BoxedScaler)
}
(DAEMONSCALER_TRAIT, TraitProperty::SpreadScaler(p), Some(image_ref)) => {
Some(Box::new(BackoffWrapper::new(
ComponentDaemonScaler::new(
snapshot_data.clone(),
image_ref.to_owned(),
component_id,
lattice_id.to_owned(),
application_name.to_owned(),
p.to_owned(),
component_name,
config_names,
),
notifier.clone(),
config_scalers,
secret_scalers,
notifier_subject,
application_name,
Some(Duration::from_secs(5)),
)) as BoxedScaler)
}
(LINK_TRAIT, TraitProperty::Link(p), _) => {
// Find the target component of the link and create a scaler for it
components
.iter()
.find_map(|component| match &component.properties {
Properties::Capability {
properties:
CapabilityProperties {
id,
application,
image,
..
},
}
| Properties::Component {
properties:
ComponentProperties {
id,
application,
image,
..
},
} if component.name == p.target.name => Some(link_scaler(
p,
lattice_id,
manifest_name,
application_name,
&component.name,
component_id.to_string(),
id.as_ref(),
image.as_ref(),
application.as_ref(),
policies,
notifier_subject,
notifier,
snapshot_data,
)),
_ => None,
})
}
_ => None,
}
}));
}
/// Helper function, primarily to remove nesting, that extends a [`ScalerList`] with all scalers
/// from a capability provider [`Component`]
/// /// # Arguments
/// * `scalers` - The list of scalers to extend
/// * `components` - The list of components to convert
/// * `properties` - The properties of the capability provider to convert
/// * `traits` - The traits of the component to convert
/// * `manifest_name` - The name of the manifest that the scalers are being created for
/// * `application_name` - The name of the application that the scalers are being created for
/// * `component_name` - The name of the component to convert
/// * **The following arguments are required to create scalers, passed directly through to the scaler
/// * `lattice_id` - The lattice id the scalers operate on
/// * `policies` - The policies to use when creating the scalers so they can access secrets
/// * `notifier_subject` - The subject to use when creating the scalers so they can report status
/// * `notifier` - The publisher to use when creating the scalers so they can report status
/// * `snapshot_data` - The store to use when creating the scalers so they can access lattice state
#[allow(clippy::too_many_arguments)]
fn provider_scalers<S, P, L>(
scalers: &mut ScalerList,
components: &[Component],
properties: &CapabilityProperties,
traits: Option<&Vec<Trait>>,
manifest_name: &str,
application_name: &str,
component_name: &str,
lattice_id: &str,
policies: &HashMap<&String, &Policy>,
notifier_subject: &str,
notifier: &P,
snapshot_data: &SnapshotStore<S, L>,
) where
S: ReadStore + Send + Sync + Clone + 'static,
P: Publisher + Clone + Send + Sync + 'static,
L: LinkSource + ConfigSource + SecretSource + Clone + Send + Sync + 'static,
{
// If an image is specified, then it's a provider in the same manifest. Otherwise, it's a shared component
let provider_id = if properties.image.is_some() {
compute_component_id(manifest_name, properties.id.as_ref(), component_name)
} else {
compute_component_id(application_name, properties.id.as_ref(), component_name)
};
let mut scaler_specified = false;
scalers.extend(traits.unwrap_or(&EMPTY_TRAIT_VEC).iter().filter_map(|trt| {
match (trt.trait_type.as_str(), &trt.properties, &properties.image) {
// Shared application components already have their own spread/daemon scalers, you
// cannot modify them from another manifest
(SPREADSCALER_TRAIT, TraitProperty::SpreadScaler(_), None) => {
warn!(
"Unsupported SpreadScaler trait specified for a shared provider {component_name}"
);
None
}
(DAEMONSCALER_TRAIT, TraitProperty::SpreadScaler(_), None) => {
warn!(
"Unsupported DaemonScaler trait specified for a shared provider {component_name}"
);
None
}
(SPREADSCALER_TRAIT, TraitProperty::SpreadScaler(p), Some(image)) => {
scaler_specified = true;
let (config_scalers, mut config_names) =
config_to_scalers(snapshot_data, application_name, &properties.config);
let (secret_scalers, secret_names) = secrets_to_scalers(
snapshot_data,
application_name,
&properties.secrets,
policies,
);
config_names.append(&mut secret_names.clone());
Some(Box::new(BackoffWrapper::new(
ProviderSpreadScaler::new(
snapshot_data.clone(),
ProviderSpreadConfig {
lattice_id: lattice_id.to_owned(),
provider_id: provider_id.to_owned(),
provider_reference: image.to_owned(),
spread_config: p.to_owned(),
model_name: application_name.to_owned(),
provider_config: config_names,
},
component_name,
),
notifier.clone(),
config_scalers,
secret_scalers,
notifier_subject,
application_name,
// Providers are a bit longer because it can take a bit to download
Some(Duration::from_secs(60)),
)) as BoxedScaler)
}
(DAEMONSCALER_TRAIT, TraitProperty::SpreadScaler(p), Some(image)) => {
scaler_specified = true;
let (config_scalers, mut config_names) =
config_to_scalers(snapshot_data, application_name, &properties.config);
let (secret_scalers, secret_names) = secrets_to_scalers(
snapshot_data,
application_name,
&properties.secrets,
policies,
);
config_names.append(&mut secret_names.clone());
Some(Box::new(BackoffWrapper::new(
ProviderDaemonScaler::new(
snapshot_data.clone(),
ProviderSpreadConfig {
lattice_id: lattice_id.to_owned(),
provider_id: provider_id.to_owned(),
provider_reference: image.to_owned(),
spread_config: p.to_owned(),
model_name: application_name.to_owned(),
provider_config: config_names,
},
component_name,
),
notifier.clone(),
config_scalers,
secret_scalers,
notifier_subject,
application_name,
// Providers are a bit longer because it can take a bit to download
Some(Duration::from_secs(60)),
)) as BoxedScaler)
}
// Find the target component of the link and create a scaler for it.
(LINK_TRAIT, TraitProperty::Link(p), _) => {
components
.iter()
.find_map(|component| match &component.properties {
// Providers cannot link to other providers, only components
Properties::Capability { .. } if component.name == p.target.name => {
error!(
"Provider {} cannot link to provider {}, only components",
&component.name, p.target.name
);
None
}
Properties::Component {
properties:
ComponentProperties {
image,
application,
id,
..
},
} if component.name == p.target.name => Some(link_scaler(
p,
lattice_id,
manifest_name,
application_name,
&component.name,
provider_id.to_owned(),
id.as_ref(),
image.as_ref(),
application.as_ref(),
policies,
notifier_subject,
notifier,
snapshot_data,
)),
_ => None,
})
}
_ => None,
}
}));
// Allow providers to omit the spreadscaler entirely for simplicity
if !scaler_specified {
if let Some(image) = &properties.image {
let (config_scalers, mut config_names) =
config_to_scalers(snapshot_data, application_name, &properties.config);
let (secret_scalers, mut secret_names) = secrets_to_scalers(
snapshot_data,
application_name,
&properties.secrets,
policies,
);
config_names.append(&mut secret_names);
scalers.push(Box::new(BackoffWrapper::new(
ProviderSpreadScaler::new(
snapshot_data.clone(),
ProviderSpreadConfig {
lattice_id: lattice_id.to_owned(),
provider_id,
provider_reference: image.to_owned(),
spread_config: SpreadScalerProperty {
instances: 1,
spread: vec![],
},
model_name: application_name.to_owned(),
provider_config: config_names,
},
component_name,
),
notifier.clone(),
config_scalers,
secret_scalers,
notifier_subject,
application_name,
// Providers are a bit longer because it can take a bit to download
Some(Duration::from_secs(60)),
)) as BoxedScaler)
}
}
}
/// Resolves configuration, secrets, and the target of a link to create a boxed [`LinkScaler`]
///
/// # Arguments
/// * `link_property` - The properties of the link to convert
/// * `lattice_id` - The lattice id the scalers operate on
/// * `manifest_name` - The name of the manifest that the scalers are being created for
/// * `application_name` - The name of the application that the scalers are being created for
/// * `component_name` - The name of the component to convert
/// * `source_id` - The ID of the source component
/// * `target_id` - The optional ID of the target component
/// * `image` - The optional image reference of the target component
/// * `shared` - The optional shared application reference of the target component
/// * `policies` - The policies to use when creating the scalers so they can access secrets
/// * `notifier_subject` - The subject to use when creating the scalers so they can report status
/// * `notifier` - The publisher to use when creating the scalers so they can report status
/// * `snapshot_data` - The store to use when creating the scalers so they can access lattice state
#[allow(clippy::too_many_arguments)]
fn link_scaler<S, P, L>(
link_property: &LinkProperty,
lattice_id: &str,
manifest_name: &str,
application_name: &str,
component_name: &str,
source_id: String,
target_id: Option<&String>,
image: Option<&String>,
shared: Option<&SharedApplicationComponentProperties>,
policies: &HashMap<&String, &Policy>,
notifier_subject: &str,
notifier: &P,
snapshot_data: &SnapshotStore<S, L>,
) -> BoxedScaler
where
S: ReadStore + Send + Sync + Clone + 'static,
P: Publisher + Clone + Send + Sync + 'static,
L: LinkSource + ConfigSource + SecretSource + Clone + Send + Sync + 'static,
{
let (mut config_scalers, mut source_config) = config_to_scalers(
snapshot_data,
manifest_name,
&link_property
.source
.as_ref()
.unwrap_or(&Default::default())
.config,
);
let (target_config_scalers, mut target_config) =
config_to_scalers(snapshot_data, manifest_name, &link_property.target.config);
let (target_secret_scalers, target_secrets) = secrets_to_scalers(
snapshot_data,
manifest_name,
&link_property.target.secrets,
policies,
);
let (mut source_secret_scalers, source_secrets) = secrets_to_scalers(
snapshot_data,
manifest_name,
&link_property
.source
.as_ref()
.unwrap_or(&Default::default())
.secrets,
policies,
);
config_scalers.extend(target_config_scalers);
source_secret_scalers.extend(target_secret_scalers);
target_config.extend(target_secrets);
source_config.extend(source_secrets);
let (target_manifest_name, target_component_name) =
match resolve_manifest_component(manifest_name, component_name, image, shared) {
Ok(name) => name,
Err(err) => {
error!(err);
return Box::new(StatusScaler::new(
uuid::Uuid::new_v4().to_string(),
LINK_SCALER_KIND,
format!(
"{} -({}:{})-> {}",
component_name,
link_property.namespace,
link_property.package,
link_property.target.name
),
StatusInfo::failed(err),
)) as BoxedScaler;
}
};
let target = compute_component_id(target_manifest_name, target_id, target_component_name);
Box::new(BackoffWrapper::new(
LinkScaler::new(
snapshot_data.clone(),
LinkScalerConfig {
source_id,
target,
wit_namespace: link_property.namespace.to_owned(),
wit_package: link_property.package.to_owned(),
wit_interfaces: link_property.interfaces.to_owned(),
name: link_property
.name
.to_owned()
.unwrap_or_else(|| DEFAULT_LINK_NAME.to_string()),
lattice_id: lattice_id.to_owned(),
model_name: application_name.to_owned(),
source_config,
target_config,
},
snapshot_data.clone(),
),
notifier.clone(),
config_scalers,
source_secret_scalers,
notifier_subject,
application_name,
Some(Duration::from_secs(5)),
)) as BoxedScaler
}
/// Returns a tuple which is a list of scalers and a list of the names of the configs that the
/// scalers use.
///
/// Any input [ConfigProperty] that has a `properties` field will be converted into a [ConfigScaler], and
/// the name of the configuration will be modified to be unique to the model and component. If the properties
/// field is not present, the name will be used as-is and assumed that it's managed externally to wadm.
fn config_to_scalers<C: ConfigSource + Send + Sync + Clone>(
config_source: &C,
manifest_name: &str,
configs: &[ConfigProperty],
) -> (Vec<ConfigScaler<C>>, Vec<String>) {
configs
.iter()
.map(|config| {
let name = if config.properties.is_some() {
compute_component_id(manifest_name, None, &config.name)
} else {
config.name.clone()
};
(
ConfigScaler::new(config_source.clone(), &name, config.properties.as_ref()),
name,
)
})
.unzip()
}
fn secrets_to_scalers<S: SecretSource + Send + Sync + Clone>(
secret_source: &S,
manifest_name: &str,
secrets: &[SecretProperty],
policies: &HashMap<&String, &Policy>,
) -> (Vec<SecretScaler<S>>, Vec<String>) {
secrets
.iter()
.map(|s| {
let name = compute_secret_id(manifest_name, None, &s.name);
let policy = *policies.get(&s.properties.policy).unwrap();
(
SecretScaler::new(
name.clone(),
policy.clone(),
s.clone(),
secret_source.clone(),
),
name,
)
})
.unzip()
}
/// Based on the name of the model and the optionally provided ID, returns a unique ID for the
/// component that is a sanitized version of the component reference and model name, separated
/// by a dash.
pub(crate) fn compute_component_id(
manifest_name: &str,
component_id: Option<&String>,
component_name: &str,
) -> String {
if let Some(id) = component_id {
id.to_owned()
} else {
format!(
"{}-{}",
manifest_name
.to_lowercase()
.replace(|c: char| !c.is_ascii_alphanumeric(), "_"),
component_name
.to_lowercase()
.replace(|c: char| !c.is_ascii_alphanumeric(), "_")
)
}
}
pub(crate) fn compute_secret_id(
manifest_name: &str,
component_id: Option<&String>,
component_name: &str,
) -> String {
let name = compute_component_id(manifest_name, component_id, component_name);
format!("{SECRET_PREFIX}_{name}")
}
/// Helper function to resolve a link to a manifest component, returning the name of the manifest
/// and the name of the component where the target resides.
///
/// If the component resides in the same manifest, then the name of the manifest & the name of the
/// component as specified will be returned. In the case that the component resides in a shared
/// application, the name of the shared application & the name of the component in that application
/// will be returned.
///
/// # Arguments
/// * `application_name` - The name of the manifest that the scalers are being created for
/// * `component_name` - The name of the component in the source manifest to target
/// * `component_image_ref` - The image reference for the component
/// * `shared_app_info` - The optional shared application reference for the component
fn resolve_manifest_component<'a>(
application_name: &'a str,
component_name: &'a str,
component_image_ref: Option<&'a String>,
shared_app_info: Option<&'a SharedApplicationComponentProperties>,
) -> Result<(&'a str, &'a str), &'a str> {
match (component_image_ref, shared_app_info) {
(Some(_), None) => Ok((application_name, component_name)),
(None, Some(app)) => Ok((app.name.as_str(), app.component.as_str())),
// These two cases should both be unreachable, since this is caught at manifest
// validation before it's put. Just in case, we'll log an error and ensure the status is failed
(None, None) => Err("Application did not specify an image or shared application reference"),
(Some(_image), Some(_app)) => {
Err("Application specified both an image and a shared application reference")
}
}
}
#[cfg(test)]
mod test {
use super::compute_component_id;
#[test]
fn compute_proper_component_id() {
// User supplied ID always takes precedence
assert_eq!(
compute_component_id("mymodel", Some(&"myid".to_string()), "echo"),
"myid"
);
assert_eq!(
compute_component_id(
"some model name with spaces cause yaml",
Some(&"myid".to_string()),
" echo "
),
"myid"
);
// Sanitize component reference
assert_eq!(
compute_component_id("mymodel", None, "echo-component"),
"mymodel-echo_component"
);
// Ensure we can support spaces in the model name, because YAML strings
assert_eq!(
compute_component_id("some model name with spaces cause yaml", None, "echo"),
"some_model_name_with_spaces_cause_yaml-echo"
);
// Ensure we can support spaces in the model name, because YAML strings
// Ensure we can support lowercasing the reference as well, just in case
assert_eq!(
compute_component_id("My ThInG", None, "thing.wasm"),
"my_thing-thing_wasm"
);
}
}

View File

@ -7,7 +7,6 @@ use tokio::sync::RwLock;
use tracing::{instrument, trace};
use wadm_types::{api::StatusInfo, Spread, SpreadScalerProperty, TraitProperty};
use crate::events::ConfigSet;
use crate::scaler::spreadscaler::{
compute_ineligible_hosts, eligible_hosts, spreadscaler_annotations,
};
@ -18,19 +17,19 @@ use crate::{
storage::{Component, Host, ReadStore},
};
use super::compute_id_sha256;
use super::compute_config_hash;
pub mod provider;
// Annotation constants
pub const DAEMON_SCALER_KIND: &str = "DaemonScaler";
pub const COMPONENT_DAEMON_SCALER_TYPE: &str = "componentdaemonscaler";
/// Config for a ComponentDaemonScaler
/// Config for an ActorDaemonScaler
#[derive(Clone, Debug)]
struct ComponentSpreadConfig {
/// OCI, Bindle, or File reference for a component
component_reference: String,
/// Unique component identifier for a component
struct ActorSpreadConfig {
/// OCI, Bindle, or File reference for an actor
actor_reference: String,
/// Unique component identifier for an actor
component_id: String,
/// Lattice ID that this DaemonScaler monitors
lattice_id: String,
@ -40,13 +39,13 @@ struct ComponentSpreadConfig {
spread_config: SpreadScalerProperty,
}
/// The ComponentDaemonScaler ensures that a certain number of instances are running on every host, according to a
/// The ActorDaemonScaler ensures that a certain number of instances are running on every host, according to a
/// [SpreadScalerProperty](crate::model::SpreadScalerProperty)
///
/// If no [Spreads](crate::model::Spread) are specified, this Scaler simply maintains the number of instances
/// on every available host.
pub struct ComponentDaemonScaler<S> {
spread_config: ComponentSpreadConfig,
pub struct ActorDaemonScaler<S> {
spread_config: ActorSpreadConfig,
store: S,
id: String,
status: RwLock<StatusInfo>,
@ -54,19 +53,11 @@ pub struct ComponentDaemonScaler<S> {
}
#[async_trait]
impl<S: ReadStore + Send + Sync + Clone> Scaler for ComponentDaemonScaler<S> {
impl<S: ReadStore + Send + Sync + Clone> Scaler for ActorDaemonScaler<S> {
fn id(&self) -> &str {
&self.id
}
fn kind(&self) -> &str {
DAEMON_SCALER_KIND
}
fn name(&self) -> String {
self.spread_config.component_id.to_string()
}
async fn status(&self) -> StatusInfo {
let _ = self.reconcile().await;
self.status.read().await.to_owned()
@ -120,9 +111,6 @@ impl<S: ReadStore + Send + Sync + Clone> Scaler for ComponentDaemonScaler<S> {
Ok(Vec::new())
}
}
Event::ConfigSet(ConfigSet { config_name }) if self.config.contains(config_name) => {
self.reconcile().await
}
// No other event impacts the job of this scaler so we can ignore it
_ => Ok(Vec::new()),
}
@ -153,7 +141,7 @@ impl<S: ReadStore + Send + Sync + Clone> Scaler for ComponentDaemonScaler<S> {
if host.components.contains_key(component_id) {
Some(Command::ScaleComponent(ScaleComponent {
component_id: component_id.to_owned(),
reference: self.spread_config.component_reference.to_owned(),
reference: self.spread_config.actor_reference.to_owned(),
host_id: host_id.to_string(),
count: 0,
model_name: self.spread_config.model_name.to_owned(),
@ -188,14 +176,14 @@ impl<S: ReadStore + Send + Sync + Clone> Scaler for ComponentDaemonScaler<S> {
let eligible_hosts = eligible_hosts(&hosts, spread);
if !eligible_hosts.is_empty() {
// Create a list of (host_id, current_count) tuples
// current_count is the number of component instances that are running for this spread on this host
let components_per_host = eligible_hosts
// current_count is the number of actor instances that are running for this spread on this host
let actors_per_host = eligible_hosts
.into_keys()
.map(|id| {
let count = component
.as_ref()
.and_then(|component| {
component.instances.get(&id.to_string()).map(|instances| {
.and_then(|actor| {
actor.instances.get(&id.to_string()).map(|instances| {
instances
.iter()
.filter_map(|info| {
@ -218,19 +206,19 @@ impl<S: ReadStore + Send + Sync + Clone> Scaler for ComponentDaemonScaler<S> {
.collect::<Vec<(&String, usize)>>();
Some(
components_per_host
actors_per_host
.iter()
.filter_map(|(host_id, current_count)| {
// Here we'll generate commands for the proper host depending on where they are running
match current_count.cmp(&self.spread_config.spread_config.instances)
{
Ordering::Equal => None,
// Scale component can handle both up and down scaling
// Scale actor can handle both up and down scaling
Ordering::Less | Ordering::Greater => {
Some(Command::ScaleComponent(ScaleComponent {
reference: self
.spread_config
.component_reference
.actor_reference
.to_owned(),
component_id: component_id.to_owned(),
host_id: host_id.to_string(),
@ -249,28 +237,23 @@ impl<S: ReadStore + Send + Sync + Clone> Scaler for ComponentDaemonScaler<S> {
.collect::<Vec<Command>>(),
)
} else {
// No hosts were eligible, so we can't attempt to add or remove components
// No hosts were eligible, so we can't attempt to add or remove actors
trace!(?spread.name, "Found no eligible hosts for daemon scaler");
spread_status.push(StatusInfo::failed(&format!(
"Could not satisfy daemonscaler {} for {}, 0 eligible hosts found.",
spread.name, self.spread_config.component_reference
spread.name, self.spread_config.actor_reference
)));
None
}
})
.flatten()
.collect::<Vec<Command>>();
trace!(?commands, "Calculated commands for component daemon scaler");
trace!(?commands, "Calculated commands for actor daemon scaler");
let status = match (spread_status.is_empty(), commands.is_empty()) {
// No failures, no commands, scaler satisfied
(true, true) => StatusInfo::deployed(""),
// No failures, commands generated, scaler is reconciling
(true, false) => {
StatusInfo::reconciling(&format!("Scaling component on {} host(s)", commands.len()))
}
// Failures occurred, scaler is in a failed state
(false, _) => StatusInfo::failed(
(_, false) => StatusInfo::reconciling(""),
(false, true) => StatusInfo::failed(
&spread_status
.into_iter()
.map(|s| s.message)
@ -289,7 +272,7 @@ impl<S: ReadStore + Send + Sync + Clone> Scaler for ComponentDaemonScaler<S> {
let mut config_clone = self.spread_config.clone();
config_clone.spread_config.instances = 0;
let cleanerupper = ComponentDaemonScaler {
let cleanerupper = ActorDaemonScaler {
spread_config: config_clone,
store: self.store.clone(),
id: self.id.clone(),
@ -301,12 +284,12 @@ impl<S: ReadStore + Send + Sync + Clone> Scaler for ComponentDaemonScaler<S> {
}
}
impl<S: ReadStore + Send + Sync> ComponentDaemonScaler<S> {
/// Construct a new ComponentDaemonScaler with specified configuration values
impl<S: ReadStore + Send + Sync> ActorDaemonScaler<S> {
/// Construct a new ActorDaemonScaler with specified configuration values
#[allow(clippy::too_many_arguments)]
pub fn new(
store: S,
component_reference: String,
actor_reference: String,
component_id: String,
lattice_id: String,
model_name: String,
@ -314,18 +297,12 @@ impl<S: ReadStore + Send + Sync> ComponentDaemonScaler<S> {
component_name: &str,
config: Vec<String>,
) -> Self {
// Compute the id of this scaler based on all of the configuration values
// that make it unique. This is used during upgrades to determine if a
// scaler is the same as a previous one.
let mut id_parts = vec![
DAEMON_SCALER_KIND,
&model_name,
component_name,
&component_id,
&component_reference,
];
id_parts.extend(config.iter().map(std::string::String::as_str));
let id = compute_id_sha256(&id_parts);
let mut id =
format!("{COMPONENT_DAEMON_SCALER_TYPE}-{model_name}-{component_name}-{component_id}");
if !config.is_empty() {
id.push('-');
id.push_str(&compute_config_hash(&config));
}
// If no spreads are specified, an empty spread is sufficient to match _every_ host
// in a lattice
let spread_config = if spread_config.spread.is_empty() {
@ -338,8 +315,8 @@ impl<S: ReadStore + Send + Sync> ComponentDaemonScaler<S> {
};
Self {
store,
spread_config: ComponentSpreadConfig {
component_reference,
spread_config: ActorSpreadConfig {
actor_reference,
component_id,
lattice_id,
spread_config,
@ -361,16 +338,16 @@ mod test {
sync::Arc,
};
use anyhow::{anyhow, Result};
use anyhow::Result;
use chrono::Utc;
use wadm_types::{api::StatusType, Spread, SpreadScalerProperty};
use wasmcloud_control_interface::{HostInventory, Link};
use wasmcloud_control_interface::{HostInventory, InterfaceLinkDefinition};
use crate::{
commands::Command,
consumers::{manager::Worker, ScopedMessage},
events::{Event, LinkdefDeleted, LinkdefSet, ProviderStarted, ProviderStopped},
scaler::{daemonscaler::ComponentDaemonScaler, manager::ScalerManager, Scaler},
scaler::{daemonscaler::ActorDaemonScaler, manager::ScalerManager, Scaler},
storage::{Component, Host, Store, WadmComponentInfo},
test_util::{NoopPublisher, TestLatticeSource, TestStore},
workers::{CommandPublisher, EventWorker, StatusPublisher},
@ -381,7 +358,7 @@ mod test {
#[tokio::test]
async fn can_compute_spread_commands() -> Result<()> {
let lattice_id = "one_host";
let component_reference = "fakecloud.azurecr.io/echo:0.3.4".to_string();
let actor_reference = "fakecloud.azurecr.io/echo:0.3.4".to_string();
let component_id = "fakecloud_azurecr_io_echo_0_3_4".to_string();
let host_id = "NASDASDIMAREALHOST";
@ -432,9 +409,9 @@ mod test {
],
};
let daemonscaler = ComponentDaemonScaler::new(
let daemonscaler = ActorDaemonScaler::new(
store.clone(),
component_reference.to_string(),
actor_reference.to_string(),
component_id.to_string(),
lattice_id.to_string(),
MODEL_NAME.to_string(),
@ -447,7 +424,7 @@ mod test {
assert_eq!(cmds.len(), 4);
assert!(cmds.contains(&Command::ScaleComponent(ScaleComponent {
component_id: component_id.to_string(),
reference: component_reference.to_string(),
reference: actor_reference.to_string(),
host_id: host_id.to_string(),
count: 13,
model_name: MODEL_NAME.to_string(),
@ -456,7 +433,7 @@ mod test {
})));
assert!(cmds.contains(&Command::ScaleComponent(ScaleComponent {
component_id: component_id.to_string(),
reference: component_reference.to_string(),
reference: actor_reference.to_string(),
host_id: host_id.to_string(),
count: 13,
model_name: MODEL_NAME.to_string(),
@ -465,7 +442,7 @@ mod test {
})));
assert!(cmds.contains(&Command::ScaleComponent(ScaleComponent {
component_id: component_id.to_string(),
reference: component_reference.to_string(),
reference: actor_reference.to_string(),
host_id: host_id.to_string(),
count: 13,
model_name: MODEL_NAME.to_string(),
@ -474,7 +451,7 @@ mod test {
})));
assert!(cmds.contains(&Command::ScaleComponent(ScaleComponent {
component_id: component_id.to_string(),
reference: component_reference.to_string(),
reference: actor_reference.to_string(),
host_id: host_id.to_string(),
count: 13,
model_name: MODEL_NAME.to_string(),
@ -489,9 +466,9 @@ mod test {
async fn can_scale_up_and_down() -> Result<()> {
let lattice_id = "computing_spread_commands";
let echo_ref = "fakecloud.azurecr.io/echo:0.3.4".to_string();
let echo_id = "MASDASDIAMAREALCOMPONENTECHO";
let echo_id = "MASDASDIAMAREALACTORECHO";
let blobby_ref = "fakecloud.azurecr.io/blobby:0.5.2".to_string();
let blobby_id = "MASDASDIAMAREALCOMPONENTBLOBBY";
let blobby_id = "MASDASDIAMAREALACTORBLOBBY";
let host_id_one = "NASDASDIMAREALHOSTONE";
let host_id_two = "NASDASDIMAREALHOSTTWO";
@ -553,7 +530,7 @@ mod test {
],
};
let echo_daemonscaler = ComponentDaemonScaler::new(
let echo_daemonscaler = ActorDaemonScaler::new(
store.clone(),
echo_ref.to_string(),
echo_id.to_string(),
@ -564,7 +541,7 @@ mod test {
vec![],
);
let blobby_daemonscaler = ComponentDaemonScaler::new(
let blobby_daemonscaler = ActorDaemonScaler::new(
store.clone(),
blobby_ref.to_string(),
blobby_id.to_string(),
@ -670,7 +647,7 @@ mod test {
components: HashMap::from_iter([
(echo_id.to_string(), 1),
(blobby_id.to_string(), 3),
("MSOMEOTHERCOMPONENT".to_string(), 3),
("MSOMEOTHERACTOR".to_string(), 3),
]),
friendly_name: "hey".to_string(),
labels: HashMap::from_iter([
@ -786,7 +763,7 @@ mod test {
async fn can_react_to_host_events() -> Result<()> {
let lattice_id = "computing_spread_commands";
let blobby_ref = "fakecloud.azurecr.io/blobby:0.5.2".to_string();
let blobby_id = "MASDASDIAMAREALCOMPONENTBLOBBY";
let blobby_id = "MASDASDIAMAREALACTORBLOBBY";
let host_id_one = "NASDASDIMAREALHOSTONE";
let host_id_two = "NASDASDIMAREALHOSTTWO";
@ -798,19 +775,20 @@ mod test {
// Inserting for heartbeat handling later
lattice_source.inventory.write().await.insert(
host_id_three.to_string(),
HostInventory::builder()
.friendly_name("hey".into())
.labels(BTreeMap::from_iter([
HostInventory {
components: vec![],
friendly_name: "hey".to_string(),
labels: HashMap::from_iter([
("cloud".to_string(), "purgatory".to_string()),
("location".to_string(), "edge".to_string()),
("region".to_string(), "us-brooks-1".to_string()),
]))
.host_id(host_id_three.into())
.version("1.0.0".into())
.uptime_human("what is time really anyway maaaan".into())
.uptime_seconds(42)
.build()
.map_err(|e| anyhow!("failed to build host inventory: {e}"))?,
]),
providers: vec![],
host_id: host_id_three.to_string(),
version: "1.0.0".to_string(),
uptime_human: "what is time really anyway maaaan".to_string(),
uptime_seconds: 42,
},
);
let command_publisher = CommandPublisher::new(NoopPublisher, "doesntmatter");
let status_publisher = StatusPublisher::new(NoopPublisher, None, "doesntmatter");
@ -840,7 +818,7 @@ mod test {
weight: None,
}],
};
let blobby_daemonscaler = ComponentDaemonScaler::new(
let blobby_daemonscaler = ActorDaemonScaler::new(
store.clone(),
blobby_ref.to_string(),
blobby_id.to_string(),
@ -901,7 +879,7 @@ mod test {
Host {
components: HashMap::from_iter([
(blobby_id.to_string(), 10),
("MSOMEOTHERCOMPONENT".to_string(), 3),
("MSOMEOTHERACTOR".to_string(), 3),
]),
friendly_name: "hey".to_string(),
labels: HashMap::from_iter([
@ -960,7 +938,7 @@ mod test {
.is_empty());
assert!(blobby_daemonscaler
.handle_event(&Event::LinkdefSet(LinkdefSet {
linkdef: Link::default()
linkdef: InterfaceLinkDefinition::default()
}))
.await?
.is_empty());

View File

@ -4,20 +4,15 @@ use anyhow::Result;
use async_trait::async_trait;
use tokio::sync::RwLock;
use tracing::{instrument, trace};
use wadm_types::api::StatusType;
use wadm_types::{api::StatusInfo, Spread, SpreadScalerProperty, TraitProperty};
use crate::commands::StopProvider;
use crate::events::{
ConfigSet, HostHeartbeat, ProviderHealthCheckFailed, ProviderHealthCheckInfo,
ProviderHealthCheckPassed, ProviderInfo, ProviderStarted, ProviderStopped,
};
use crate::scaler::compute_id_sha256;
use crate::events::{HostHeartbeat, ProviderInfo, ProviderStarted, ProviderStopped};
use crate::scaler::compute_config_hash;
use crate::scaler::spreadscaler::{
compute_ineligible_hosts, eligible_hosts, provider::ProviderSpreadConfig,
spreadscaler_annotations,
};
use crate::storage::{Provider, ProviderStatus};
use crate::SCALER_KEY;
use crate::{
commands::{Command, StartProvider},
@ -26,7 +21,8 @@ use crate::{
storage::{Host, ReadStore},
};
use super::DAEMON_SCALER_KIND;
// Annotation constants
pub const PROVIDER_DAEMON_SCALER_TYPE: &str = "providerdaemonscaler";
/// The ProviderDaemonScaler ensures that a provider is running on every host, according to a
/// [SpreadScalerProperty](crate::model::SpreadScalerProperty)
@ -46,14 +42,6 @@ impl<S: ReadStore + Send + Sync + Clone> Scaler for ProviderDaemonScaler<S> {
&self.id
}
fn kind(&self) -> &str {
DAEMON_SCALER_KIND
}
fn name(&self) -> String {
self.config.provider_id.to_string()
}
async fn status(&self) -> StatusInfo {
let _ = self.reconcile().await;
self.status.read().await.to_owned()
@ -102,65 +90,6 @@ impl<S: ReadStore + Send + Sync + Clone> Scaler for ProviderDaemonScaler<S> {
{
self.reconcile().await
}
// perform status updates for health check events
Event::ProviderHealthCheckFailed(ProviderHealthCheckFailed {
data: ProviderHealthCheckInfo { provider_id, .. },
})
| Event::ProviderHealthCheckPassed(ProviderHealthCheckPassed {
data: ProviderHealthCheckInfo { provider_id, .. },
}) if provider_id == &self.config.provider_id => {
let provider = self
.store
.get::<Provider>(&self.config.lattice_id, &self.config.provider_id)
.await?;
let unhealthy_providers = provider.map_or(0, |p| {
p.hosts
.values()
.filter(|s| *s == &ProviderStatus::Failed)
.count()
});
let status = self.status.read().await.to_owned();
// update health status of scaler
if let Some(status) = match (status, unhealthy_providers > 0) {
// scaler is deployed but contains unhealthy providers
(
StatusInfo {
status_type: StatusType::Deployed,
..
},
true,
) => Some(StatusInfo::failed(&format!(
"Unhealthy provider on {} host(s)",
unhealthy_providers
))),
// scaler can become unhealthy only if it was previously deployed
// once scaler becomes healthy again revert back to deployed state
// this is a workaround to detect unhealthy status until
// StatusType::Unhealthy can be used
(
StatusInfo {
status_type: StatusType::Failed,
message,
},
false,
) if message.starts_with("Unhealthy provider on") => {
Some(StatusInfo::deployed(""))
}
// don't update status if scaler is not deployed
_ => None,
} {
*self.status.write().await = status;
}
// only status needs update no new commands required
Ok(Vec::new())
}
Event::ConfigSet(ConfigSet { config_name })
if self.config.provider_config.contains(config_name) =>
{
self.reconcile().await
}
// No other event impacts the job of this scaler so we can ignore it
_ => Ok(Vec::new()),
}
@ -169,6 +98,7 @@ impl<S: ReadStore + Send + Sync + Clone> Scaler for ProviderDaemonScaler<S> {
#[instrument(level = "trace", skip_all, fields(name = %self.config.model_name, scaler_id = %self.id))]
async fn reconcile(&self) -> Result<Vec<Command>> {
let hosts = self.store.list::<Host>(&self.config.lattice_id).await?;
let provider_id = &self.config.provider_id;
let provider_ref = &self.config.provider_reference;
@ -277,14 +207,9 @@ impl<S: ReadStore + Send + Sync + Clone> Scaler for ProviderDaemonScaler<S> {
trace!(?commands, "Calculated commands for provider daemonscaler");
let status = match (spread_status.is_empty(), commands.is_empty()) {
// No failures, no commands, scaler satisfied
(true, true) => StatusInfo::deployed(""),
// No failures, commands generated, scaler is reconciling
(true, false) => {
StatusInfo::reconciling(&format!("Scaling provider on {} host(s)", commands.len()))
}
// Failures occurred, scaler is in a failed state
(false, _) => StatusInfo::failed(
(_, false) => StatusInfo::reconciling(""),
(false, true) => StatusInfo::failed(
&spread_status
.into_iter()
.map(|s| s.message)
@ -317,23 +242,14 @@ impl<S: ReadStore + Send + Sync + Clone> Scaler for ProviderDaemonScaler<S> {
impl<S: ReadStore + Send + Sync> ProviderDaemonScaler<S> {
/// Construct a new ProviderDaemonScaler with specified configuration values
pub fn new(store: S, config: ProviderSpreadConfig, component_name: &str) -> Self {
// Compute the id of this scaler based on all of the configuration values
// that make it unique. This is used during upgrades to determine if a
// scaler is the same as a previous one.
let mut id_parts = vec![
DAEMON_SCALER_KIND,
&config.model_name,
component_name,
&config.provider_id,
&config.provider_reference,
];
id_parts.extend(
config
.provider_config
.iter()
.map(std::string::String::as_str),
let mut id = format!(
"{PROVIDER_DAEMON_SCALER_TYPE}-{}-{component_name}-{}",
config.model_name, config.provider_id,
);
let id = compute_id_sha256(&id_parts);
if !config.provider_config.is_empty() {
id.push('-');
id.push_str(&compute_config_hash(&config.provider_config))
}
// If no spreads are specified, an empty spread is sufficient to match _every_ host
// in a lattice
@ -393,8 +309,16 @@ mod test {
provider_config: vec![],
};
let scaler1 =
let scaler =
ProviderDaemonScaler::new(Arc::new(TestStore::default()), config, "myprovider");
assert_eq!(
scaler.id(),
format!(
"{PROVIDER_DAEMON_SCALER_TYPE}-{}-myprovider-provider_id",
MODEL_NAME
),
"ProviderDaemonScaler ID should be valid"
);
let config = ProviderSpreadConfig {
lattice_id: "lattice".to_string(),
@ -408,12 +332,28 @@ mod test {
provider_config: vec!["foobar".to_string()],
};
let scaler2 =
let scaler =
ProviderDaemonScaler::new(Arc::new(TestStore::default()), config, "myprovider");
assert_ne!(
scaler1.id(),
scaler2.id(),
"ProviderDaemonScaler IDs should be different with different configuration"
assert_eq!(
scaler.id(),
format!(
"{PROVIDER_DAEMON_SCALER_TYPE}-{}-myprovider-provider_id-{}",
MODEL_NAME,
compute_config_hash(&["foobar".to_string()])
),
"ProviderDaemonScaler ID should be valid"
);
let mut scaler_id_tokens = scaler.id().split('-');
scaler_id_tokens.next_back();
let scaler_id_tokens = scaler_id_tokens.collect::<Vec<&str>>().join("-");
assert_eq!(
scaler_id_tokens,
format!(
"{PROVIDER_DAEMON_SCALER_TYPE}-{}-myprovider-provider_id",
MODEL_NAME
),
"ProviderDaemonScaler ID should be valid and depends on provider_config"
);
}
@ -568,274 +508,4 @@ mod test {
Ok(())
}
#[tokio::test]
async fn test_healthy_providers_return_healthy_status() -> Result<()> {
let lattice_id = "test_healthy_providers";
let provider_ref = "fakecloud.azurecr.io/provider:3.2.1".to_string();
let provider_id = "VASDASDIAMAREALPROVIDERPROVIDER";
let host_id_one = "NASDASDIMAREALHOSTONE";
let host_id_two = "NASDASDIMAREALHOSTTWO";
let store = Arc::new(TestStore::default());
store
.store(
lattice_id,
host_id_one.to_string(),
Host {
components: HashMap::new(),
friendly_name: "hey".to_string(),
labels: HashMap::from_iter([
("inda".to_string(), "cloud".to_string()),
("cloud".to_string(), "fake".to_string()),
("region".to_string(), "us-noneofyourbusiness-1".to_string()),
]),
providers: HashSet::from_iter([ProviderInfo {
provider_id: provider_id.to_string(),
provider_ref: provider_ref.to_string(),
annotations: BTreeMap::default(),
}]),
uptime_seconds: 123,
version: None,
id: host_id_one.to_string(),
last_seen: Utc::now(),
},
)
.await?;
store
.store(
lattice_id,
host_id_two.to_string(),
Host {
components: HashMap::new(),
friendly_name: "hey".to_string(),
labels: HashMap::from_iter([
("inda".to_string(), "cloud".to_string()),
("cloud".to_string(), "real".to_string()),
("region".to_string(), "us-yourhouse-1".to_string()),
]),
providers: HashSet::from_iter([ProviderInfo {
provider_id: provider_id.to_string(),
provider_ref: provider_ref.to_string(),
annotations: BTreeMap::default(),
}]),
uptime_seconds: 123,
version: None,
id: host_id_two.to_string(),
last_seen: Utc::now(),
},
)
.await?;
store
.store(
lattice_id,
provider_id.to_string(),
Provider {
id: provider_id.to_string(),
name: "provider".to_string(),
issuer: "issuer".to_string(),
reference: provider_ref.to_string(),
hosts: HashMap::from([
(host_id_one.to_string(), ProviderStatus::Failed),
(host_id_two.to_string(), ProviderStatus::Running),
]),
},
)
.await?;
// Ensure we spread evenly with equal weights, clean division
let multi_spread_even = SpreadScalerProperty {
// instances are ignored so putting an absurd number
instances: 2,
spread: vec![Spread {
name: "SimpleOne".to_string(),
requirements: BTreeMap::from_iter([("inda".to_string(), "cloud".to_string())]),
weight: Some(100),
}],
};
let spreadscaler = ProviderDaemonScaler::new(
store.clone(),
ProviderSpreadConfig {
lattice_id: lattice_id.to_string(),
provider_id: provider_id.to_string(),
provider_reference: provider_ref.to_string(),
spread_config: multi_spread_even,
model_name: MODEL_NAME.to_string(),
provider_config: vec!["foobar".to_string()],
},
"fake_component",
);
spreadscaler.reconcile().await?;
spreadscaler
.handle_event(&Event::ProviderHealthCheckFailed(
ProviderHealthCheckFailed {
data: ProviderHealthCheckInfo {
provider_id: provider_id.to_string(),
host_id: host_id_one.to_string(),
},
},
))
.await?;
store
.store(
lattice_id,
provider_id.to_string(),
Provider {
id: provider_id.to_string(),
name: "provider".to_string(),
issuer: "issuer".to_string(),
reference: provider_ref.to_string(),
hosts: HashMap::from([
(host_id_one.to_string(), ProviderStatus::Pending),
(host_id_two.to_string(), ProviderStatus::Running),
]),
},
)
.await?;
spreadscaler
.handle_event(&Event::ProviderHealthCheckPassed(
ProviderHealthCheckPassed {
data: ProviderHealthCheckInfo {
provider_id: provider_id.to_string(),
host_id: host_id_two.to_string(),
},
},
))
.await?;
assert_eq!(
spreadscaler.status.read().await.to_owned(),
StatusInfo::deployed("")
);
Ok(())
}
#[tokio::test]
async fn test_unhealthy_providers_return_unhealthy_status() -> Result<()> {
let lattice_id = "test_unhealthy_providers";
let provider_ref = "fakecloud.azurecr.io/provider:3.2.1".to_string();
let provider_id = "VASDASDIAMAREALPROVIDERPROVIDER";
let host_id_one = "NASDASDIMAREALHOSTONE";
let host_id_two = "NASDASDIMAREALHOSTTWO";
let store = Arc::new(TestStore::default());
store
.store(
lattice_id,
host_id_one.to_string(),
Host {
components: HashMap::new(),
friendly_name: "hey".to_string(),
labels: HashMap::from_iter([
("inda".to_string(), "cloud".to_string()),
("cloud".to_string(), "fake".to_string()),
("region".to_string(), "us-noneofyourbusiness-1".to_string()),
]),
providers: HashSet::from_iter([ProviderInfo {
provider_id: provider_id.to_string(),
provider_ref: provider_ref.to_string(),
annotations: BTreeMap::default(),
}]),
uptime_seconds: 123,
version: None,
id: host_id_one.to_string(),
last_seen: Utc::now(),
},
)
.await?;
store
.store(
lattice_id,
host_id_two.to_string(),
Host {
components: HashMap::new(),
friendly_name: "hey".to_string(),
labels: HashMap::from_iter([
("inda".to_string(), "cloud".to_string()),
("cloud".to_string(), "real".to_string()),
("region".to_string(), "us-yourhouse-1".to_string()),
]),
providers: HashSet::from_iter([ProviderInfo {
provider_id: provider_id.to_string(),
provider_ref: provider_ref.to_string(),
annotations: BTreeMap::default(),
}]),
uptime_seconds: 123,
version: None,
id: host_id_two.to_string(),
last_seen: Utc::now(),
},
)
.await?;
store
.store(
lattice_id,
provider_id.to_string(),
Provider {
id: provider_id.to_string(),
name: "provider".to_string(),
issuer: "issuer".to_string(),
reference: provider_ref.to_string(),
hosts: HashMap::from([
(host_id_one.to_string(), ProviderStatus::Failed),
(host_id_two.to_string(), ProviderStatus::Running),
]),
},
)
.await?;
// Ensure we spread evenly with equal weights, clean division
let multi_spread_even = SpreadScalerProperty {
// instances are ignored so putting an absurd number
instances: 2,
spread: vec![Spread {
name: "SimpleOne".to_string(),
requirements: BTreeMap::from_iter([("inda".to_string(), "cloud".to_string())]),
weight: Some(100),
}],
};
let spreadscaler = ProviderDaemonScaler::new(
store.clone(),
ProviderSpreadConfig {
lattice_id: lattice_id.to_string(),
provider_id: provider_id.to_string(),
provider_reference: provider_ref.to_string(),
spread_config: multi_spread_even,
model_name: MODEL_NAME.to_string(),
provider_config: vec!["foobar".to_string()],
},
"fake_component",
);
spreadscaler.reconcile().await?;
spreadscaler
.handle_event(&Event::ProviderHealthCheckFailed(
ProviderHealthCheckFailed {
data: ProviderHealthCheckInfo {
provider_id: provider_id.to_string(),
host_id: host_id_one.to_string(),
},
},
))
.await?;
assert_eq!(
spreadscaler.status.read().await.to_owned(),
StatusInfo::failed("Unhealthy provider on 1 host(s)")
);
Ok(())
}
}

View File

@ -1,6 +1,6 @@
//! A struct that manages creating and removing scalers for all manifests
use std::{collections::HashMap, ops::Deref, sync::Arc};
use std::{collections::HashMap, ops::Deref, sync::Arc, time::Duration};
use anyhow::Result;
use async_nats::jetstream::{
@ -18,19 +18,29 @@ use tokio::{
};
use tracing::{debug, error, instrument, trace, warn};
use wadm_types::{
api::{Status, StatusInfo},
Manifest,
api::StatusInfo, CapabilityProperties, Component, ComponentProperties, ConfigProperty,
Manifest, Properties, SpreadScalerProperty, Trait, TraitProperty, DAEMONSCALER_TRAIT,
LINK_TRAIT, SPREADSCALER_TRAIT,
};
use crate::{
events::Event,
publisher::Publisher,
scaler::{Command, Scaler},
scaler::{spreadscaler::ActorSpreadScaler, Command, Scaler},
storage::{snapshot::SnapshotStore, ReadStore},
workers::{CommandPublisher, ConfigSource, LinkSource, SecretSource, StatusPublisher},
workers::{CommandPublisher, ConfigSource, LinkSource, StatusPublisher},
DEFAULT_LINK_NAME,
};
use super::convert::manifest_components_to_scalers;
use super::{
configscaler::ConfigScaler,
daemonscaler::{provider::ProviderDaemonScaler, ActorDaemonScaler},
spreadscaler::{
link::{LinkScaler, LinkScalerConfig},
provider::{ProviderSpreadConfig, ProviderSpreadScaler},
},
BackoffAwareScaler,
};
pub type BoxedScaler = Box<dyn Scaler + Send + Sync + 'static>;
pub type ScalerList = Vec<BoxedScaler>;
@ -123,7 +133,7 @@ impl<StateStore, P, L> ScalerManager<StateStore, P, L>
where
StateStore: ReadStore + Send + Sync + Clone + 'static,
P: Publisher + Clone + Send + Sync + 'static,
L: LinkSource + ConfigSource + SecretSource + Clone + Send + Sync + 'static,
L: LinkSource + ConfigSource + Clone + Send + Sync + 'static,
{
/// Creates a new ScalerManager configured to notify messages to `wadm.notify.{lattice_id}`
/// using the given jetstream client. Also creates an ephemeral consumer for notifications on
@ -170,9 +180,7 @@ where
.list(multitenant_prefix, lattice_id)
.await?
.into_iter()
.map(|summary| {
manifest_store.get(multitenant_prefix, lattice_id, summary.name().to_owned())
});
.map(|summary| manifest_store.get(multitenant_prefix, lattice_id, summary.name));
let all_manifests = futures::future::join_all(futs)
.await
.into_iter()
@ -189,13 +197,12 @@ where
.filter_map(|manifest| {
let data = manifest.get_deployed()?;
let name = manifest.name().to_owned();
let scalers = manifest_components_to_scalers(
let scalers = components_to_scalers(
&data.spec.components,
&data.policy_lookup(),
lattice_id,
&client,
&name,
&subject,
&client,
&snapshot_data,
);
Some((name, scalers))
@ -280,13 +287,12 @@ where
}
pub fn scalers_for_manifest<'a>(&'a self, manifest: &'a Manifest) -> ScalerList {
manifest_components_to_scalers(
components_to_scalers(
&manifest.spec.components,
&manifest.policy_lookup(),
&self.lattice_id,
&self.client,
&manifest.metadata.name,
&self.subject,
&self.client,
&self.snapshot_data,
)
}
@ -332,8 +338,8 @@ where
/// notification or handling commands fails, then this function will reinsert the scalers back into the internal map
/// and return an error (so this function can be called again)
// NOTE(thomastaylor312): This was designed the way it is to avoid race conditions. We only ever
// stop components and providers that have the right annotation. So if for some reason this
// leaves something hanging, we should probably add something to the reaper
// stop actors and providers that have the right annotation. So if for some reason this leaves
// something hanging, we should probably add something to the reaper
#[instrument(level = "debug", skip(self), fields(lattice_id = %self.lattice_id))]
pub async fn remove_scalers(&self, name: &str) -> Option<Result<()>> {
let scalers = match self.remove_scalers_internal(name).await {
@ -379,13 +385,11 @@ where
/// Does everything except sending the notification
#[instrument(level = "debug", skip(self), fields(lattice_id = %self.lattice_id))]
async fn remove_scalers_internal(&self, name: &str) -> Option<Result<ScalerList>> {
// Remove the scalers first to avoid them handling events while we're cleaning up
let scalers = self.remove_raw_scalers(name).await?;
// Always refresh data before cleaning up
// Always refresh data before removing
if let Err(e) = self.refresh_data().await {
return Some(Err(e));
}
let scalers = self.remove_raw_scalers(name).await?;
let commands = match futures::future::join_all(
scalers.iter().map(|scaler| scaler.cleanup()),
)
@ -430,13 +434,12 @@ where
match notification {
Notifications::CreateScalers(manifest) => {
// We don't want to trigger the notification, so just create the scalers and then insert
let scalers = manifest_components_to_scalers(
let scalers = components_to_scalers(
&manifest.spec.components,
&manifest.policy_lookup(),
&self.lattice_id,
&self.client,
&manifest.metadata.name,
&self.subject,
&self.client,
&self.snapshot_data,
);
let num_scalers = scalers.len();
@ -457,10 +460,7 @@ where
// hasn't deleted the scaler yet
if let Err(e) = self
.status_publisher
.publish_status(&name, Status::new(
StatusInfo::undeployed("Manifest has been undeployed"),
Vec::with_capacity(0),
))
.publish_status(&name, StatusInfo::undeployed(""))
.await
{
warn!(error = ?e, "Failed to set status to undeployed");
@ -491,7 +491,7 @@ where
// wrapped ones (which is good from a Rust API point of view). If
// this starts to become a problem, we can revisit how we handle
// this (probably by requiring that this struct always wraps any
// scaler in the backoff wrapper and using custom methods from that
// scaler in the backoff scaler and using custom methods from that
// type)
Notifications::RegisterExpectedEvents{ name, scaler_id, triggering_event } => {
trace!(%name, "Computing and registering expected events for manifest");
@ -553,3 +553,367 @@ where
}
}
}
const EMPTY_TRAIT_VEC: Vec<Trait> = Vec::new();
/// Converts a list of components into a list of scalers
///
/// # Arguments
/// * `components` - The list of components to convert
/// * `store` - The store to use when creating the scalers so they can access lattice state
/// * `lattice_id` - The lattice id the scalers operate on
/// * `name` - The name of the manifest that the scalers are being created for
pub(crate) fn components_to_scalers<S, P, L>(
components: &[Component],
lattice_id: &str,
notifier: &P,
name: &str,
notifier_subject: &str,
snapshot_data: &SnapshotStore<S, L>,
) -> ScalerList
where
S: ReadStore + Send + Sync + Clone + 'static,
P: Publisher + Clone + Send + Sync + 'static,
L: LinkSource + ConfigSource + Clone + Send + Sync + 'static,
{
let mut scalers: ScalerList = Vec::new();
for component in components.iter() {
let traits = component.traits.as_ref();
match &component.properties {
Properties::Component { properties: props } => {
scalers.extend(traits.unwrap_or(&EMPTY_TRAIT_VEC).iter().filter_map(|trt| {
let component_id =
compute_component_id(name, props.id.as_ref(), &component.name);
let (config_scalers, config_names) =
config_to_scalers(snapshot_data.clone(), name, &props.config);
match (trt.trait_type.as_str(), &trt.properties) {
(SPREADSCALER_TRAIT, TraitProperty::SpreadScaler(p)) => {
Some(Box::new(BackoffAwareScaler::new(
ActorSpreadScaler::new(
snapshot_data.clone(),
props.image.to_owned(),
component_id,
lattice_id.to_owned(),
name.to_owned(),
p.to_owned(),
&component.name,
config_names,
),
notifier.to_owned(),
config_scalers,
notifier_subject,
name,
Some(Duration::from_secs(5)),
)) as BoxedScaler)
}
(DAEMONSCALER_TRAIT, TraitProperty::SpreadScaler(p)) => {
Some(Box::new(BackoffAwareScaler::new(
ActorDaemonScaler::new(
snapshot_data.clone(),
props.image.to_owned(),
component_id,
lattice_id.to_owned(),
name.to_owned(),
p.to_owned(),
&component.name,
config_names,
),
notifier.to_owned(),
config_scalers,
notifier_subject,
name,
Some(Duration::from_secs(5)),
)) as BoxedScaler)
}
(LINK_TRAIT, TraitProperty::Link(p)) => {
components.iter().find_map(|component| {
let (mut config_scalers, source_config) = config_to_scalers(
snapshot_data.clone(),
name,
&p.source_config,
);
let (target_config_scalers, target_config) = config_to_scalers(
snapshot_data.clone(),
name,
&p.target_config,
);
config_scalers.extend(target_config_scalers);
match &component.properties {
Properties::Capability {
properties: CapabilityProperties { id, .. },
}
| Properties::Component {
properties: ComponentProperties { id, .. },
} if component.name == p.target => {
Some(Box::new(BackoffAwareScaler::new(
LinkScaler::new(
snapshot_data.clone(),
LinkScalerConfig {
source_id: component_id.to_string(),
target: compute_component_id(
name,
id.as_ref(),
&component.name,
),
wit_namespace: p.namespace.to_owned(),
wit_package: p.package.to_owned(),
wit_interfaces: p.interfaces.to_owned(),
name: p.name.to_owned().unwrap_or_else(|| {
DEFAULT_LINK_NAME.to_string()
}),
lattice_id: lattice_id.to_owned(),
model_name: name.to_owned(),
source_config,
target_config,
},
snapshot_data.clone(),
),
notifier.to_owned(),
config_scalers,
notifier_subject,
name,
Some(Duration::from_secs(5)),
))
as BoxedScaler)
}
_ => None,
}
})
}
_ => None,
}
}))
}
Properties::Capability { properties: props } => {
let provider_id = compute_component_id(name, props.id.as_ref(), &component.name);
let mut scaler_specified = false;
if let Some(traits) = traits {
scalers.extend(traits.iter().filter_map(|trt| {
match (trt.trait_type.as_str(), &trt.properties) {
(SPREADSCALER_TRAIT, TraitProperty::SpreadScaler(p)) => {
scaler_specified = true;
let (config_scalers, config_names) =
config_to_scalers(snapshot_data.clone(), name, &props.config);
Some(Box::new(BackoffAwareScaler::new(
ProviderSpreadScaler::new(
snapshot_data.clone(),
ProviderSpreadConfig {
lattice_id: lattice_id.to_owned(),
provider_id: provider_id.to_owned(),
provider_reference: props.image.to_owned(),
spread_config: p.to_owned(),
model_name: name.to_owned(),
provider_config: config_names,
},
&component.name,
),
notifier.to_owned(),
config_scalers,
notifier_subject,
name,
// Providers are a bit longer because it can take a bit to download
Some(Duration::from_secs(60)),
)) as BoxedScaler)
}
(DAEMONSCALER_TRAIT, TraitProperty::SpreadScaler(p)) => {
scaler_specified = true;
let (config_scalers, config_names) =
config_to_scalers(snapshot_data.clone(), name, &props.config);
Some(Box::new(BackoffAwareScaler::new(
ProviderDaemonScaler::new(
snapshot_data.clone(),
ProviderSpreadConfig {
lattice_id: lattice_id.to_owned(),
provider_id: provider_id.to_owned(),
provider_reference: props.image.to_owned(),
spread_config: p.to_owned(),
model_name: name.to_owned(),
provider_config: config_names,
},
&component.name,
),
notifier.to_owned(),
config_scalers,
notifier_subject,
name,
// Providers are a bit longer because it can take a bit to download
Some(Duration::from_secs(60)),
)) as BoxedScaler)
}
(LINK_TRAIT, TraitProperty::Link(p)) => {
components.iter().find_map(|component| {
let (mut config_scalers, source_config) = config_to_scalers(
snapshot_data.clone(),
name,
&p.source_config,
);
let (target_config_scalers, target_config) = config_to_scalers(
snapshot_data.clone(),
name,
&p.target_config,
);
config_scalers.extend(target_config_scalers);
match &component.properties {
Properties::Component { properties: cappy }
if component.name == p.target =>
{
Some(Box::new(BackoffAwareScaler::new(
LinkScaler::new(
snapshot_data.clone(),
LinkScalerConfig {
source_id: provider_id.to_string(),
target: compute_component_id(
name,
cappy.id.as_ref(),
&component.name,
),
wit_namespace: p.namespace.to_owned(),
wit_package: p.package.to_owned(),
wit_interfaces: p.interfaces.to_owned(),
name: p.name.to_owned().unwrap_or_else(
|| DEFAULT_LINK_NAME.to_string(),
),
lattice_id: lattice_id.to_owned(),
model_name: name.to_owned(),
source_config,
target_config,
},
snapshot_data.clone(),
),
notifier.to_owned(),
config_scalers,
notifier_subject,
name,
Some(Duration::from_secs(5)),
))
as BoxedScaler)
}
_ => None,
}
})
}
_ => None,
}
}))
}
// Allow providers to omit the scaler entirely for simplicity
if !scaler_specified {
let (config_scalers, config_names) =
config_to_scalers(snapshot_data.clone(), name, &props.config);
scalers.push(Box::new(BackoffAwareScaler::new(
ProviderSpreadScaler::new(
snapshot_data.clone(),
ProviderSpreadConfig {
lattice_id: lattice_id.to_owned(),
provider_id,
provider_reference: props.image.to_owned(),
spread_config: SpreadScalerProperty {
instances: 1,
spread: vec![],
},
model_name: name.to_owned(),
provider_config: config_names,
},
&component.name,
),
notifier.to_owned(),
config_scalers,
notifier_subject,
name,
// Providers are a bit longer because it can take a bit to download
Some(Duration::from_secs(60)),
)) as BoxedScaler)
}
}
}
}
scalers
}
/// Returns a tuple which is a list of scalers and a list of the names of the configs that the
/// scalers use.
///
/// Any input [ConfigProperty] that has a `properties` field will be converted into a [ConfigScaler], and
/// the name of the configuration will be modified to be unique to the model and component. If the properties
/// field is not present, the name will be used as-is and assumed that it's managed externally to wadm.
fn config_to_scalers<C: ConfigSource + Send + Sync + Clone>(
config_source: C,
model_name: &str,
configs: &[ConfigProperty],
) -> (Vec<ConfigScaler<C>>, Vec<String>) {
configs
.iter()
.map(|config| {
let name = if config.properties.is_some() {
compute_component_id(model_name, None, &config.name)
} else {
config.name.clone()
};
(
ConfigScaler::new(config_source.clone(), &name, config.properties.as_ref()),
name,
)
})
.unzip()
}
/// Based on the name of the model and the optionally provided ID, returns a unique ID for the
/// component that is a sanitized version of the component reference and model name, separated
/// by a dash.
pub(crate) fn compute_component_id(
model_name: &str,
component_id: Option<&String>,
component_name: &str,
) -> String {
if let Some(id) = component_id {
id.to_owned()
} else {
format!(
"{}-{}",
model_name
.to_lowercase()
.replace(|c: char| !c.is_ascii_alphanumeric(), "_"),
component_name
.to_lowercase()
.replace(|c: char| !c.is_ascii_alphanumeric(), "_")
)
}
}
#[cfg(test)]
mod test {
use crate::scaler::manager::compute_component_id;
#[test]
fn compute_proper_component_id() {
// User supplied ID always takes precedence
assert_eq!(
compute_component_id("mymodel", Some(&"myid".to_string()), "echo"),
"myid"
);
assert_eq!(
compute_component_id(
"some model name with spaces cause yaml",
Some(&"myid".to_string()),
" echo "
),
"myid"
);
// Sanitize component reference
assert_eq!(
compute_component_id("mymodel", None, "echo-component"),
"mymodel-echo_component"
);
// Ensure we can support spaces in the model name, because YAML strings
assert_eq!(
compute_component_id("some model name with spaces cause yaml", None, "echo"),
"some_model_name_with_spaces_cause_yaml-echo"
);
// Ensure we can support spaces in the model name, because YAML strings
// Ensure we can support lowercasing the reference as well, just in case
assert_eq!(
compute_component_id("My ThInG", None, "thing.wasm"),
"my_thing-thing_wasm"
);
}
}

View File

@ -2,7 +2,7 @@ use std::{sync::Arc, time::Duration};
use anyhow::Result;
use async_trait::async_trait;
use sha2::{Digest, Sha256};
use base64::{engine::general_purpose, Engine as _};
use tokio::{
sync::{Mutex, RwLock},
task::JoinHandle,
@ -12,26 +12,21 @@ use wadm_types::{api::StatusInfo, TraitProperty};
use crate::{
commands::Command,
events::{ComponentScaleFailed, ComponentScaled, Event, ProviderStartFailed, ProviderStarted},
events::{Event, ProviderStartFailed, ProviderStarted},
publisher::Publisher,
workers::{get_commands_and_result, ConfigSource, SecretSource},
workers::{get_commands_and_result, ConfigSource},
};
pub mod configscaler;
mod convert;
pub mod daemonscaler;
pub mod manager;
pub mod secretscaler;
pub mod spreadscaler;
pub mod statusscaler;
use manager::Notifications;
use self::configscaler::ConfigScaler;
use self::secretscaler::SecretScaler;
const DEFAULT_WAIT_TIMEOUT: Duration = Duration::from_secs(30);
const DEFAULT_SCALER_KIND: &str = "Scaler";
/// A trait describing a struct that can be configured to compute the difference between
/// desired state and configured state, returning a set of commands to approach desired state.
@ -46,23 +41,11 @@ const DEFAULT_SCALER_KIND: &str = "Scaler";
#[async_trait]
pub trait Scaler {
/// A unique identifier for this scaler type. This is used for logging and for selecting
/// specific scalers as needed. wadm scalers implement this by computing a sha256 hash of
/// all of the parameters that are used to construct the scaler, therefore ensuring that
/// the ID is unique for each scaler
/// specific scalers as needed. Generally this should be something like
/// `$NAME_OF_SCALER_TYPE-$MODEL_NAME-$OCI_REF`. However, the only requirement is that it can
/// uniquely identify a scaler
fn id(&self) -> &str;
/// An optional human-friendly name for this scaler. This is used for logging and for selecting
/// specific scalers as needed. This is optional and by default returns the same value as `id`,
/// and does not have to be unique
fn name(&self) -> String {
self.id().to_string()
}
/// An optional kind of scaler. This is used for logging and for selecting specific scalers as needed
fn kind(&self) -> &str {
DEFAULT_SCALER_KIND
}
/// Determine the status of this scaler according to reconciliation logic. This is the opportunity
/// for scalers to indicate that they are unhealthy with a message as to what's missing.
async fn status(&self) -> StatusInfo;
@ -88,21 +71,19 @@ pub trait Scaler {
async fn cleanup(&self) -> Result<Vec<Command>>;
}
/// The BackoffWrapper is a wrapper around a scaler that is responsible for
/// ensuring that a particular scaler doesn't get overwhelmed with events and has the
/// necessary prerequisites to reconcile.
/// The BackoffAwareScaler is a wrapper around a scaler that is responsible for
/// ensuring that a particular [Scaler] has the proper prerequisites in place
/// and should be able to reconcile and issue commands.
///
/// 1. `required_config` & `required_secrets`: With the introduction of configuration
/// for wadm applications, the most necessary prerequisite for components, providers
/// and links to start is that their configuration is available. Scalers will not be
/// able to issue commands until the configuration exists.
/// 2. `expected_events`: For scalers that issue commands that should result in events,
/// the BackoffWrapper is responsible for ensuring that the scaler doesn't continually
/// issue commands that it's already expecting events for. Commonly this will allow a host
/// to download larger images from an OCI repository without being bombarded with repeat requests.
/// 3. `backoff_status`: If a scaler receives an event that it was expecting, but it was a failure
/// event, the scaler should back off exponentially while reporting that failure status. This both
/// allows for diagnosing issues with reconciliation and prevents thrashing.
/// 1. With the introduction of configuration for wadm applications, the most necessary
/// prerequisite for components, providers and links to start is that their
/// configuration is available. Scalers will not be able to issue commands until
/// the configuration exists.
/// 2. For scalers that issue commands that take a long time to complete, like downloading
/// an image for a provider, the BackoffAwareScaler will ensure that the scaler is not
/// overwhelmed with events and will back off until the expected events have been received.
/// 3. In the future (#253) this wrapper should also be responsible for exponential backoff
/// when a scaler is repeatedly issuing the same commands to prevent thrashing.
///
/// All of the above effectively allows the inner Scaler to only worry about the logic around
/// reconciling and handling events, rather than be concerned about whether or not
@ -111,13 +92,12 @@ pub trait Scaler {
/// The `notifier` is used to publish notifications to add, remove, or recompute
/// expected events with scalers on other wadm instances, as only one wadm instance
/// at a time will handle a specific event.
pub(crate) struct BackoffWrapper<T, P, C> {
pub(crate) struct BackoffAwareScaler<T, P, C> {
scaler: T,
notifier: P,
notify_subject: String,
model_name: String,
required_config: Vec<ConfigScaler<C>>,
required_secrets: Vec<SecretScaler<C>>,
/// A list of (success, Option<failure>) events that the scaler is expecting
#[allow(clippy::type_complexity)]
expected_events: Arc<RwLock<Vec<(Event, Option<Event>)>>>,
@ -125,27 +105,20 @@ pub(crate) struct BackoffWrapper<T, P, C> {
event_cleaner: Mutex<Option<JoinHandle<()>>>,
/// The amount of time to wait before cleaning up the expected events list
cleanup_timeout: std::time::Duration,
/// The status of the scaler, set when the scaler is backing off due to a
/// failure event.
backoff_status: Arc<RwLock<Option<StatusInfo>>>,
// TODO(#253): Figure out where/when/how to store the backoff and exponentially repeat it
/// Responsible for cleaning up the backoff status after a specified duration
status_cleaner: Mutex<Option<JoinHandle<()>>>,
}
impl<T, P, C> BackoffWrapper<T, P, C>
impl<T, P, C> BackoffAwareScaler<T, P, C>
where
T: Scaler + Send + Sync,
P: Publisher + Send + Sync + 'static,
C: ConfigSource + SecretSource + Send + Sync + Clone + 'static,
C: ConfigSource + Send + Sync + Clone + 'static,
{
/// Wraps the given scaler in a new BackoffWrapper. `cleanup_timeout` can be set to a
/// Wraps the given scaler in a new backoff aware scaler. `cleanup_timeout` can be set to a
/// desired waiting time, otherwise it will default to 30s
pub fn new(
scaler: T,
notifier: P,
required_config: Vec<ConfigScaler<C>>,
required_secrets: Vec<SecretScaler<C>>,
notify_subject: &str,
model_name: &str,
cleanup_timeout: Option<Duration>,
@ -154,14 +127,11 @@ where
scaler,
notifier,
required_config,
required_secrets,
notify_subject: notify_subject.to_owned(),
model_name: model_name.to_string(),
expected_events: Arc::new(RwLock::new(Vec::new())),
event_cleaner: Mutex::new(None),
cleanup_timeout: cleanup_timeout.unwrap_or(DEFAULT_WAIT_TIMEOUT),
backoff_status: Arc::new(RwLock::new(None)),
status_cleaner: Mutex::new(None),
}
}
@ -183,32 +153,25 @@ where
expected_events.clear();
}
expected_events.extend(events);
self.set_timed_event_cleanup().await;
self.set_timed_cleanup().await;
}
/// Removes an event pair from the expected events list if one matches the given event
/// Returns a tuple of bools, the first indicating if the event was removed, and the second
/// indicating if the event was the failure event
async fn remove_event(&self, event: &Event) -> Result<(bool, bool)> {
/// Returns true if the event was removed, false otherwise
async fn remove_event(&self, event: &Event) -> Result<bool> {
let mut expected_events = self.expected_events.write().await;
let before_count = expected_events.len();
let mut failed_event = false;
expected_events.retain(|(success, fail)| {
let matches_success = evt_matches_expected(success, event);
let matches_failure = fail
.as_ref()
.map_or(false, |f| evt_matches_expected(f, event));
// Update failed_event if the event matches the failure event
failed_event |= matches_failure;
// Retain the event if it doesn't match either the success or failure event
!(matches_success || matches_failure)
// Retain the event if it doesn't match either the success or optional failure event.
// Most events have a possibility of seeing a failure and either one means we saw the
// event we were expecting
!evt_matches_expected(success, event)
&& !fail
.as_ref()
.map(|f| evt_matches_expected(f, event))
.unwrap_or(false)
});
Ok((expected_events.len() < before_count, failed_event))
Ok(expected_events.len() != before_count)
}
/// Handles an incoming event for the given scaler.
@ -230,24 +193,8 @@ where
#[instrument(level = "trace", skip_all, fields(scaler_id = %self.id()))]
async fn handle_event_internal(&self, event: &Event) -> anyhow::Result<Vec<Command>> {
let model_name = &self.model_name;
let (expected_event, failed_event) = self.remove_event(event).await?;
let commands: Vec<Command> = if expected_event {
// So here, if we receive a failed event that it was "expecting"
// Then we know that the scaler status is essentially failed and should retry
// So we should tell the other scalers to remove the event, AND other scalers
// in the process of removing that event will know that it failed.
trace!(failed_event, "Scaler received event that it was expecting");
if failed_event {
let failed_message = match event {
Event::ProviderStartFailed(evt) => evt.error.clone(),
Event::ComponentScaleFailed(evt) => evt.error.clone(),
_ => format!("Received a failed event of type '{}'", event.raw_type()),
};
*self.backoff_status.write().await = Some(StatusInfo::failed(&failed_message));
// TODO(#253): Here we could refer to a stored previous duration and increase it
self.set_timed_status_cleanup(std::time::Duration::from_secs(5))
.await;
}
let commands: Vec<Command> = if self.remove_event(event).await? {
trace!("Scaler received event that it was expecting");
let data = serde_json::to_vec(&Notifications::RemoveExpectedEvent {
name: model_name.to_owned(),
scaler_id: self.scaler.id().to_owned(),
@ -264,12 +211,9 @@ where
// If a scaler is expecting events still, don't have it handle events. This is effectively
// the backoff mechanism within wadm
Vec::with_capacity(0)
} else if self.backoff_status.read().await.is_some() {
trace!("Scaler received event but is in backoff, ignoring");
Vec::with_capacity(0)
} else {
trace!("Scaler is not backing off, checking configuration");
let (mut config_commands, res) = get_commands_and_result(
let (commands, res) = get_commands_and_result(
self.required_config
.iter()
.map(|config| async { config.handle_event(event).await }),
@ -284,25 +228,8 @@ where
);
}
let (mut secret_commands, res) = get_commands_and_result(
self.required_secrets
.iter()
.map(|secret| async { secret.handle_event(event).await }),
"Errors occurred while handling event with secret scalers",
)
.await;
if let Err(e) = res {
error!(
"Error occurred while handling event with secret scalers: {}",
e
);
}
// If the config scalers or secret scalers have commands to send, return them
if !config_commands.is_empty() || !secret_commands.is_empty() {
config_commands.append(&mut secret_commands);
return Ok(config_commands);
if !commands.is_empty() {
return Ok(commands);
}
trace!("Scaler required configuration is present, handling event");
@ -310,7 +237,9 @@ where
// Based on the commands, compute the events that we expect to see for this scaler. The scaler
// will then ignore incoming events until all of the expected events have been received.
let expected_events = commands.iter().filter_map(|cmd| cmd.corresponding_event());
let expected_events = commands
.iter()
.filter_map(|cmd| cmd.corresponding_event(model_name));
self.add_events(expected_events, false).await;
@ -338,11 +267,7 @@ where
// If we're already in backoff, return an empty list
let current_event_count = self.event_count().await;
if current_event_count > 0 {
trace!(%current_event_count, "Scaler is awaiting an event, not reconciling");
return Ok(Vec::with_capacity(0));
}
if self.backoff_status.read().await.is_some() {
tracing::info!(%current_event_count, "Scaler is backing off, not reconciling");
trace!(%current_event_count, "Scaler is backing off, not reconciling");
return Ok(Vec::with_capacity(0));
}
@ -353,14 +278,6 @@ where
});
}
let mut secret_commands = Vec::new();
for secret in &self.required_secrets {
secret.reconcile().await?.into_iter().for_each(|cmd| {
secret_commands.push(cmd);
});
}
commands.append(secret_commands.as_mut());
if !commands.is_empty() {
return Ok(commands);
}
@ -372,7 +289,7 @@ where
self.add_events(
commands
.iter()
.filter_map(|command| command.corresponding_event()),
.filter_map(|command| command.corresponding_event(&self.model_name)),
true,
)
.await;
@ -412,23 +329,11 @@ where
}
}
}
for secret in self.required_secrets.iter() {
match secret.cleanup().await {
Ok(cmds) => commands.extend(cmds),
// Explicitly logging, but continuing, in the case of an error to make sure
// we don't prevent other cleanup tasks from running
Err(e) => {
error!("Error occurred while cleaning up secret scalers: {}", e);
}
}
}
Ok(commands)
}
/// Sets a timed cleanup task to clear the expected events list after a timeout
async fn set_timed_event_cleanup(&self) {
async fn set_timed_cleanup(&self) {
let mut event_cleaner = self.event_cleaner.lock().await;
// Clear any existing handle
if let Some(handle) = event_cleaner.take() {
@ -446,30 +351,12 @@ where
.instrument(tracing::trace_span!("event_cleaner", scaler_id = %self.id())),
));
}
/// Sets a timed cleanup task to clear the expected events list after a timeout
async fn set_timed_status_cleanup(&self, timeout: Duration) {
let mut status_cleaner = self.status_cleaner.lock().await;
// Clear any existing handle
if let Some(handle) = status_cleaner.take() {
handle.abort();
}
let backoff_status = self.backoff_status.clone();
*status_cleaner = Some(tokio::spawn(
async move {
tokio::time::sleep(timeout).await;
trace!("Reached status cleanup timeout, clearing backoff status");
backoff_status.write().await.take();
}
.instrument(tracing::trace_span!("status_cleaner", scaler_id = %self.id())),
));
}
}
#[async_trait]
/// The [`Scaler`] trait implementation for the [`BackoffWrapper`] is mostly a simple wrapper,
/// with three exceptions, which allow scalers to sync state between different wadm instances.
/// The [Scaler](Scaler) trait implementation for the [BackoffAwareScaler](BackoffAwareScaler)
/// is mostly a simple wrapper, with two exceptions, which allow scalers to sync expected
/// events between different wadm instances.
///
/// * `handle_event` calls an internal method that uses a notifier to publish notifications to
/// all Scalers, even running on different wadm instances, to handle that event. The resulting
@ -477,35 +364,19 @@ where
/// * `reconcile` calls an internal method that uses a notifier to ensure all Scalers, even
/// running on different wadm instances, compute their expected events in response to the
/// reconciliation commands in order to "back off".
/// * `status` will first check to see if the scaler is in a backing off state, and if so, return
/// the backoff status. Otherwise, it will return the status of the scaler.
impl<T, P, C> Scaler for BackoffWrapper<T, P, C>
impl<T, P, C> Scaler for BackoffAwareScaler<T, P, C>
where
T: Scaler + Send + Sync,
P: Publisher + Send + Sync + 'static,
C: ConfigSource + SecretSource + Send + Sync + Clone + 'static,
C: ConfigSource + Send + Sync + Clone + 'static,
{
fn id(&self) -> &str {
// Pass through the ID of the wrapped scaler
self.scaler.id()
}
fn kind(&self) -> &str {
// Pass through the kind of the wrapped scaler
self.scaler.kind()
}
fn name(&self) -> String {
self.scaler.name()
}
async fn status(&self) -> StatusInfo {
// If the scaler has a backoff status, return that, otherwise return the status of the scaler
if let Some(status) = self.backoff_status.read().await.clone() {
status
} else {
self.scaler.status().await
}
self.scaler.status().await
}
async fn update_config(&mut self, config: TraitProperty) -> Result<Vec<Command>> {
@ -553,59 +424,23 @@ fn evt_matches_expected(incoming: &Event, expected: &Event) -> bool {
(
Event::ProviderStartFailed(ProviderStartFailed {
provider_id: p1,
provider_ref: i1,
host_id: h1,
..
}),
Event::ProviderStartFailed(ProviderStartFailed {
provider_id: p2,
provider_ref: i2,
host_id: h2,
..
}),
) => p1 == p2 && h1 == h2 && i1 == i2,
(
Event::ComponentScaled(ComponentScaled {
annotations: a1,
image_ref: i1,
component_id: c1,
host_id: h1,
..
}),
Event::ComponentScaled(ComponentScaled {
annotations: a2,
image_ref: i2,
component_id: c2,
host_id: h2,
..
}),
) => a1 == a2 && i1 == i2 && c1 == c2 && h1 == h2,
(
Event::ComponentScaleFailed(ComponentScaleFailed {
annotations: a1,
image_ref: i1,
component_id: c1,
host_id: h1,
..
}),
Event::ComponentScaleFailed(ComponentScaleFailed {
annotations: a2,
image_ref: i2,
component_id: c2,
host_id: h2,
..
}),
) => a1 == a2 && i1 == i2 && c1 == c2 && h1 == h2,
) => p1 == p2 && h1 == h2,
_ => false,
}
}
/// Computes the sha256 digest of the given parameters to form a unique ID for a scaler
pub(crate) fn compute_id_sha256(params: &[&str]) -> String {
let mut hasher = Sha256::new();
for param in params {
hasher.update(param.as_bytes())
}
let hash = hasher.finalize();
format!("{hash:x}")
/// Hash the named configurations to generate a unique identifier for the scaler
///
/// This is only called when the config is not empty so we don't need to worry about
/// returning empty strings.
pub(crate) fn compute_config_hash(config: &[String]) -> String {
general_purpose::STANDARD.encode(config.join("_"))
}

View File

@ -1,316 +0,0 @@
use anyhow::{Context, Result};
use async_trait::async_trait;
use tokio::sync::RwLock;
use tracing::{debug, error, instrument, trace};
use wadm_types::{
api::{StatusInfo, StatusType},
Policy, SecretProperty, TraitProperty,
};
use wasmcloud_secrets_types::SecretConfig;
use crate::{
commands::{Command, DeleteConfig, PutConfig},
events::{ConfigDeleted, ConfigSet, Event},
scaler::Scaler,
workers::SecretSource,
};
use super::compute_id_sha256;
const SECRET_SCALER_KIND: &str = "SecretScaler";
pub struct SecretScaler<SecretSource> {
secret_source: SecretSource,
/// The key to use in the configdata bucket for this secret
secret_name: String,
secret_config: SecretConfig,
id: String,
status: RwLock<StatusInfo>,
}
impl<S: SecretSource> SecretScaler<S> {
pub fn new(
secret_name: String,
policy: Policy,
secret_property: SecretProperty,
secret_source: S,
) -> Self {
// Compute the id of this scaler based on all of the values that make it unique.
// This is used during upgrades to determine if a scaler is the same as a previous one.
let mut id_parts = vec![
secret_name.as_str(),
policy.name.as_str(),
policy.policy_type.as_str(),
secret_property.name.as_str(),
secret_property.properties.policy.as_str(),
secret_property.properties.key.as_str(),
];
if let Some(version) = secret_property.properties.version.as_ref() {
id_parts.push(version.as_str());
}
id_parts.extend(
policy
.properties
.iter()
.flat_map(|(k, v)| vec![k.as_str(), v.as_str()]),
);
let id = compute_id_sha256(&id_parts);
let secret_config = config_from_manifest_structures(policy, secret_property)
.expect("failed to create secret config from policy and secret properties");
Self {
id,
secret_name,
secret_config,
secret_source,
status: RwLock::new(StatusInfo::reconciling("")),
}
}
}
#[async_trait]
impl<S: SecretSource + Send + Sync + Clone> Scaler for SecretScaler<S> {
fn id(&self) -> &str {
&self.id
}
fn kind(&self) -> &str {
SECRET_SCALER_KIND
}
fn name(&self) -> String {
self.secret_config.name.to_string()
}
async fn status(&self) -> StatusInfo {
let _ = self.reconcile().await;
self.status.read().await.to_owned()
}
async fn update_config(&mut self, _config: TraitProperty) -> Result<Vec<Command>> {
debug!("SecretScaler does not support updating config, ignoring");
Ok(vec![])
}
async fn handle_event(&self, event: &Event) -> Result<Vec<Command>> {
match event {
Event::ConfigSet(ConfigSet { config_name })
| Event::ConfigDeleted(ConfigDeleted { config_name }) => {
if config_name == &self.secret_name {
return self.reconcile().await;
}
}
// This is a workaround to ensure that the config has a chance to periodically
// update itself if it is out of sync. For efficiency, we only fetch configuration
// again if the status is not deployed.
Event::HostHeartbeat(_) => {
if !matches!(self.status.read().await.status_type, StatusType::Deployed) {
return self.reconcile().await;
}
}
_ => {
trace!("SecretScaler does not support this event, ignoring");
}
}
Ok(Vec::new())
}
#[instrument(level = "trace", skip_all, scaler_id = %self.id)]
async fn reconcile(&self) -> Result<Vec<Command>> {
debug!(self.secret_name, "Fetching configuration");
match self.secret_source.get_secret(&self.secret_name).await {
// If configuration matches what's supplied, this scaler is deployed
Ok(Some(config)) if config == self.secret_config => {
*self.status.write().await = StatusInfo::deployed("");
Ok(Vec::new())
}
// If configuration is out of sync, we put the configuration
Ok(_config) => {
debug!(self.secret_name, "Putting secret");
match self.secret_config.clone().try_into() {
Ok(config) => {
*self.status.write().await = StatusInfo::reconciling("Secret out of sync");
Ok(vec![Command::PutConfig(PutConfig {
config_name: self.secret_name.clone(),
config,
})])
}
Err(e) => {
*self.status.write().await = StatusInfo::failed(&format!(
"Failed to convert secret config to map: {}.",
e
));
Ok(vec![])
}
}
}
Err(e) => {
error!(error = %e, "SecretScaler failed to fetch configuration");
*self.status.write().await = StatusInfo::failed(&e.to_string());
Ok(Vec::new())
}
}
}
#[instrument(level = "trace", skip_all)]
async fn cleanup(&self) -> Result<Vec<Command>> {
Ok(vec![Command::DeleteConfig(DeleteConfig {
config_name: self.secret_name.clone(),
})])
}
}
/// Merge policy and properties into a [`SecretConfig`] for later use.
fn config_from_manifest_structures(
policy: Policy,
reference: SecretProperty,
) -> anyhow::Result<SecretConfig> {
let mut policy_properties = policy.properties.clone();
let backend = policy_properties
.remove("backend")
.context("policy did not have a backend property")?;
Ok(SecretConfig::new(
reference.name.clone(),
backend,
reference.properties.key.clone(),
reference.properties.field.clone(),
reference.properties.version.clone(),
policy_properties
.into_iter()
.map(|(k, v)| (k, v.into()))
.collect(),
))
}
#[cfg(test)]
mod test {
use super::config_from_manifest_structures;
use crate::{
commands::{Command, PutConfig},
events::{ConfigDeleted, Event, HostHeartbeat},
scaler::Scaler,
test_util::TestLatticeSource,
};
use std::collections::{BTreeMap, HashMap};
use wadm_types::{api::StatusType, Policy, SecretProperty, SecretSourceProperty};
#[tokio::test]
async fn test_secret_scaler() {
let lattice = TestLatticeSource {
claims: HashMap::new(),
inventory: Default::default(),
links: Vec::new(),
config: HashMap::new(),
};
let policy = Policy {
name: "nats-kv".to_string(),
policy_type: "secrets-backend".to_string(),
properties: BTreeMap::from([("backend".to_string(), "nats-kv".to_string())]),
};
let secret = SecretProperty {
name: "test".to_string(),
properties: SecretSourceProperty {
policy: "nats-kv".to_string(),
key: "test".to_string(),
field: None,
version: None,
},
};
let secret_scaler = super::SecretScaler::new(
secret.name.clone(),
policy.clone(),
secret.clone(),
lattice.clone(),
);
assert_eq!(
secret_scaler.status().await.status_type,
StatusType::Reconciling
);
let cfg = config_from_manifest_structures(policy, secret.clone())
.expect("failed to merge policy");
assert_eq!(
secret_scaler
.reconcile()
.await
.expect("reconcile did not succeed"),
vec![Command::PutConfig(PutConfig {
config_name: secret.name.clone(),
config: cfg.clone().try_into().expect("should convert to map"),
})],
);
assert_eq!(
secret_scaler.status().await.status_type,
StatusType::Reconciling
);
// Configuration deleted, relevant
assert_eq!(
secret_scaler
.handle_event(&Event::ConfigDeleted(ConfigDeleted {
config_name: secret.name.clone()
}))
.await
.expect("handle_event should succeed"),
vec![Command::PutConfig(PutConfig {
config_name: secret.name.clone(),
config: cfg.clone().try_into().expect("should convert to map"),
})]
);
assert_eq!(
secret_scaler.status().await.status_type,
StatusType::Reconciling
);
// Configuration deleted, irrelevant
assert_eq!(
secret_scaler
.handle_event(&Event::ConfigDeleted(ConfigDeleted {
config_name: "some_other_config".to_string()
}))
.await
.expect("handle_event should succeed"),
vec![]
);
assert_eq!(
secret_scaler.status().await.status_type,
StatusType::Reconciling
);
// Periodic reconcile with host heartbeat
assert_eq!(
secret_scaler
.handle_event(&Event::HostHeartbeat(HostHeartbeat {
components: Vec::new(),
providers: Vec::new(),
host_id: String::default(),
issuer: String::default(),
friendly_name: String::default(),
labels: HashMap::new(),
version: semver::Version::new(0, 0, 0),
uptime_human: String::default(),
uptime_seconds: 0,
}))
.await
.expect("handle_event should succeed"),
vec![Command::PutConfig(PutConfig {
config_name: secret.name.clone(),
config: cfg.clone().try_into().expect("should convert to map"),
})]
);
assert_eq!(
secret_scaler.status().await.status_type,
StatusType::Reconciling
);
}
}

View File

@ -1,3 +1,5 @@
use std::hash::{Hash, Hasher};
use anyhow::Result;
use async_trait::async_trait;
use tokio::sync::RwLock;
@ -10,12 +12,12 @@ use crate::{
Event, LinkdefDeleted, LinkdefSet, ProviderHealthCheckInfo, ProviderHealthCheckPassed,
ProviderHealthCheckStatus,
},
scaler::{compute_id_sha256, Scaler},
scaler::Scaler,
storage::ReadStore,
workers::LinkSource,
};
pub const LINK_SCALER_KIND: &str = "LinkScaler";
pub const LINK_SCALER_TYPE: &str = "linkdefscaler";
/// Config for a LinkSpreadConfig
pub struct LinkScalerConfig {
@ -62,20 +64,6 @@ where
&self.id
}
fn kind(&self) -> &str {
LINK_SCALER_KIND
}
fn name(&self) -> String {
format!(
"{} -({}:{})-> {}",
self.config.source_id,
self.config.wit_namespace,
self.config.wit_package,
self.config.target
)
}
async fn status(&self) -> StatusInfo {
let _ = self.reconcile().await;
self.status.read().await.to_owned()
@ -90,8 +78,8 @@ where
#[instrument(level = "trace", skip_all, fields(scaler_id = %self.id))]
async fn handle_event(&self, event: &Event) -> Result<Vec<Command>> {
match event {
// Trigger linkdef creation if this component starts and belongs to this model
Event::ComponentScaled(evt) if evt.component_id == self.config.source_id || evt.component_id == self.config.target => {
// Trigger linkdef creation if this actor starts and belongs to this model
Event::ComponentScaled(evt) if evt.component_id == self.config.source_id => {
self.reconcile().await
}
Event::ProviderHealthCheckPassed(ProviderHealthCheckPassed {
@ -122,9 +110,9 @@ where
self.reconcile().await
}
Event::LinkdefSet(LinkdefSet { linkdef })
if linkdef.source_id() == self.config.source_id
&& linkdef.target() == self.config.target
&& linkdef.name() == self.config.name =>
if linkdef.source_id == self.config.source_id
&& linkdef.target == self.config.target
&& linkdef.name == self.config.name =>
{
*self.status.write().await = StatusInfo::deployed("");
Ok(Vec::new())
@ -141,28 +129,27 @@ where
let (exists, _config_different) = linkdefs
.into_iter()
.find(|linkdef| {
linkdef.source_id() == source_id
&& linkdef.target() == target
&& linkdef.name() == self.config.name
&linkdef.source_id == source_id
&& &linkdef.target == target
&& linkdef.name == self.config.name
})
.map(|linkdef| {
(
true,
// TODO(#88): reverse compare too
// Ensure all supplied configs (both source and target) are the same
linkdef
.source_config()
.iter()
.eq(self.config.source_config.iter())
&& linkdef
.target_config()
.iter()
.eq(self.config.target_config.iter()),
// TODO: reverse compare too
// Ensure all named configs are the same
linkdef.source_config.iter().all(|config_name| {
self.config.source_config.iter().any(|c| c == config_name)
}) || linkdef.target_config.iter().all(|config_name| {
self.config.target_config.iter().any(|c| c == config_name)
}),
)
})
.unwrap_or((false, false));
// TODO(#88)
// TODO(brooksmtownsend): Now that links are ID based not public key based, we should be able to reenable this
// TODO: Reenable this functionality once we figure out https://github.com/wasmCloud/wadm/issues/123
// If it already exists, but values are different, we need to have a delete event first
// and recreate it with the correct values second
// let mut commands = values_different
@ -228,37 +215,14 @@ where
impl<S: ReadStore + Send + Sync, L: LinkSource> LinkScaler<S, L> {
/// Construct a new LinkScaler with specified configuration values
pub fn new(store: S, link_config: LinkScalerConfig, ctl_client: L) -> Self {
// Compute the id of this scaler based on all of the configuration values
// that make it unique. This is used during upgrades to determine if a
// scaler is the same as a previous one.
let mut id_parts = vec![
LINK_SCALER_KIND,
&link_config.model_name,
&link_config.name,
&link_config.source_id,
&link_config.target,
&link_config.wit_namespace,
&link_config.wit_package,
];
id_parts.extend(
link_config
.wit_interfaces
.iter()
.map(std::string::String::as_str),
// NOTE(thomastaylor312): Yep, this is gnarly, but it was all the information that would be
// useful to have if uniquely identifying a link scaler
let linkscaler_config_hash =
compute_linkscaler_config_hash(&link_config.source_config, &link_config.target_config);
let id = format!(
"{LINK_SCALER_TYPE}-{}-{}-{}-{}-{linkscaler_config_hash}",
link_config.model_name, link_config.name, link_config.source_id, link_config.target,
);
id_parts.extend(
link_config
.source_config
.iter()
.map(std::string::String::as_str),
);
id_parts.extend(
link_config
.target_config
.iter()
.map(std::string::String::as_str),
);
let id = compute_id_sha256(&id_parts);
Self {
store,
@ -270,6 +234,18 @@ impl<S: ReadStore + Send + Sync, L: LinkSource> LinkScaler<S, L> {
}
}
fn compute_linkscaler_config_hash(source: &[String], target: &[String]) -> u64 {
let mut linkscaler_config_hasher = std::collections::hash_map::DefaultHasher::new();
// Hash each of the names in the source and target
source.iter().for_each(|s| {
s.hash(&mut linkscaler_config_hasher);
});
target.iter().for_each(|t| {
t.hash(&mut linkscaler_config_hasher);
});
linkscaler_config_hasher.finish()
}
#[cfg(test)]
mod test {
use std::{
@ -278,7 +254,7 @@ mod test {
vec,
};
use wasmcloud_control_interface::Link;
use wasmcloud_control_interface::InterfaceLinkDefinition;
use chrono::Utc;
@ -291,20 +267,20 @@ mod test {
APP_SPEC_ANNOTATION,
};
async fn create_store(lattice_id: &str, component_ref: &str, provider_ref: &str) -> TestStore {
async fn create_store(lattice_id: &str, actor_ref: &str, provider_ref: &str) -> TestStore {
let store = TestStore::default();
store
.store(
lattice_id,
"component".to_string(),
"actor".to_string(),
Component {
id: "component".to_string(),
reference: component_ref.to_owned(),
id: "actor".to_string(),
reference: actor_ref.to_owned(),
..Default::default()
},
)
.await
.expect("Couldn't store component");
.expect("Couldn't store actor");
store
.store(
lattice_id,
@ -316,14 +292,14 @@ mod test {
},
)
.await
.expect("Couldn't store component");
.expect("Couldn't store actor");
store
}
#[tokio::test]
async fn test_different_ids() {
async fn test_id_generator() {
let lattice_id = "id_generator".to_string();
let component_ref = "component_ref".to_string();
let actor_ref = "actor_ref".to_string();
let component_id = "component_id".to_string();
let provider_ref = "provider_ref".to_string();
let provider_id = "provider_id".to_string();
@ -332,7 +308,7 @@ mod test {
let target_config = vec!["target_config".to_string()];
let scaler = LinkScaler::new(
create_store(&lattice_id, &component_ref, &provider_ref).await,
create_store(&lattice_id, &actor_ref, &provider_ref).await,
LinkScalerConfig {
source_id: provider_id.clone(),
target: component_id.clone(),
@ -348,59 +324,87 @@ mod test {
TestLatticeSource::default(),
);
let other_same_scaler = LinkScaler::new(
create_store(&lattice_id, &component_ref, &provider_ref).await,
LinkScalerConfig {
source_id: provider_id.clone(),
target: component_id.clone(),
wit_namespace: "wit_namespace".to_string(),
wit_package: "wit_package".to_string(),
wit_interfaces: vec!["wit_interface".to_string()],
name: "default".to_string(),
lattice_id: lattice_id.clone(),
model_name: "model".to_string(),
source_config: source_config.clone(),
target_config: target_config.clone(),
},
TestLatticeSource::default(),
let id = format!(
"{LINK_SCALER_TYPE}-{model_name}-{link_name}-{provider_id}-{component_id}-{linkscaler_values_hash}",
LINK_SCALER_TYPE = LINK_SCALER_TYPE,
model_name = "model",
link_name = "default",
linkscaler_values_hash = compute_linkscaler_config_hash(&source_config, &target_config)
);
assert_eq!(scaler.id(), other_same_scaler.id(), "LinkScaler ID should be the same when scalers have the same type, model name, provider link name, component reference, provider reference, and values");
assert_eq!(scaler.id(), id, "LinkScaler ID should be the same when scalers have the same type, model name, provider link name, actor reference, provider reference, and values");
let different_scaler = LinkScaler::new(
create_store(&lattice_id, &component_ref, &provider_ref).await,
LinkScalerConfig {
source_id: provider_id.clone(),
target: component_id.clone(),
wit_namespace: "wit_namespace".to_string(),
wit_package: "wit_package".to_string(),
wit_interfaces: vec!["wit_interface".to_string()],
name: "default".to_string(),
lattice_id: lattice_id.clone(),
model_name: "model".to_string(),
source_config: vec!["foo".to_string()],
target_config: vec!["bar".to_string()],
},
TestLatticeSource::default(),
let id = format!(
"{LINK_SCALER_TYPE}-{model_name}-{link_name}-{component_id}-{provider_id}-{linkscaler_values_hash}",
LINK_SCALER_TYPE = LINK_SCALER_TYPE,
model_name = "model",
link_name = "default",
linkscaler_values_hash = compute_linkscaler_config_hash(&["foo".to_string()], &["bar".to_string()])
);
assert_ne!(
scaler.id(),
different_scaler.id(),
id,
"LinkScaler ID should be different when scalers have different configured values"
);
let scaler = LinkScaler::new(
create_store(&lattice_id, &actor_ref, &provider_ref).await,
LinkScalerConfig {
source_id: component_id.clone(),
target: provider_id.clone(),
wit_namespace: "contr".to_string(),
wit_package: "act".to_string(),
wit_interfaces: vec!["interface".to_string()],
name: "default".to_string(),
lattice_id: lattice_id.clone(),
model_name: "model".to_string(),
source_config: vec![],
target_config: vec![],
},
TestLatticeSource::default(),
);
let id = format!(
"{LINK_SCALER_TYPE}-{model_name}-{link_name}-{component_id}-{provider_id}-{linkscaler_values_hash}",
LINK_SCALER_TYPE = LINK_SCALER_TYPE,
model_name = "model",
link_name = "default",
linkscaler_values_hash = compute_linkscaler_config_hash(&[], &[])
);
assert_eq!(scaler.id(), id, "LinkScaler ID should be the same when their type, model name, provider link name, actor reference, and provider reference are the same and they both have no values configured");
let scaler = LinkScaler::new(
create_store(&lattice_id, &actor_ref, &provider_ref).await,
LinkScalerConfig {
source_id: component_id.clone(),
target: provider_id.clone(),
wit_namespace: "contr".to_string(),
wit_package: "act".to_string(),
wit_interfaces: vec!["interface".to_string()],
name: "default".to_string(),
lattice_id: lattice_id.clone(),
model_name: "model".to_string(),
source_config: vec!["default-http".to_string()],
target_config: vec!["outbound-cert".to_string()],
},
TestLatticeSource::default(),
);
assert_ne!(scaler.id(), id, "Expected LinkScaler values hash to differiantiate scalers with the same type, model name, provider link name, actor reference, and provider reference");
}
#[tokio::test]
async fn test_no_linkdef() {
let lattice_id = "no-linkdef".to_string();
let component_ref = "component_ref".to_string();
let component_id = "component".to_string();
let actor_ref = "actor_ref".to_string();
let component_id = "actor".to_string();
let provider_ref = "provider_ref".to_string();
let provider_id = "provider".to_string();
let scaler = LinkScaler::new(
create_store(&lattice_id, &component_ref, &provider_ref).await,
create_store(&lattice_id, &actor_ref, &provider_ref).await,
LinkScalerConfig {
source_id: component_id.clone(),
target: provider_id.clone(),
@ -422,33 +426,72 @@ mod test {
assert!(matches!(commands[0], Command::PutLink(_)));
}
// TODO: Uncomment once https://github.com/wasmCloud/wadm/issues/123 is fixed
// #[tokio::test]
// async fn test_different_values() {
// let lattice_id = "different-values".to_string();
// let actor_ref = "actor_ref".to_string();
// let provider_ref = "provider_ref".to_string();
// let values = HashMap::from([("foo".to_string(), "bar".to_string())]);
// let mut linkdef = LinkDefinition::default();
// linkdef.component_id = "actor".to_string();
// linkdef.provider_id = "provider".to_string();
// linkdef.contract_id = "contract".to_string();
// linkdef.link_name = "default".to_string();
// linkdef.values = [("foo".to_string(), "nope".to_string())].into();
// let scaler = LinkScaler::new(
// create_store(&lattice_id, &actor_ref, &provider_ref).await,
// actor_ref,
// provider_ref,
// "contract".to_string(),
// None,
// lattice_id.clone(),
// "model".to_string(),
// Some(values),
// TestLatticeSource {
// links: vec![linkdef],
// ..Default::default()
// },
// );
// let commands = scaler.reconcile().await.expect("Couldn't reconcile");
// assert_eq!(commands.len(), 2);
// assert!(matches!(commands[0], Command::DeleteLinkdef(_)));
// assert!(matches!(commands[1], Command::PutLinkdef(_)));
// }
#[tokio::test]
async fn test_existing_linkdef() {
let lattice_id = "existing-linkdef".to_string();
let component_ref = "component_ref".to_string();
let component_id = "component".to_string();
let actor_ref = "actor_ref".to_string();
let component_id = "actor".to_string();
let provider_ref = "provider_ref".to_string();
let provider_id = "provider".to_string();
let linkdef = Link::builder()
.source_id(&component_id)
.target(&provider_id)
.wit_namespace("namespace")
.wit_package("package")
.interfaces(vec!["interface".to_string()])
.name("default")
.build()
.unwrap();
let linkdef = InterfaceLinkDefinition {
source_id: component_id.to_string(),
target: provider_id.to_string(),
wit_namespace: "namespace".to_string(),
wit_package: "package".to_string(),
interfaces: vec!["interface".to_string()],
name: "default".to_string(),
source_config: vec![],
target_config: vec![],
};
let scaler = LinkScaler::new(
create_store(&lattice_id, &component_ref, &provider_ref).await,
create_store(&lattice_id, &actor_ref, &provider_ref).await,
LinkScalerConfig {
source_id: linkdef.source_id().to_string(),
target: linkdef.target().to_string(),
wit_namespace: linkdef.wit_namespace().to_string(),
wit_package: linkdef.wit_package().to_string(),
wit_interfaces: linkdef.interfaces().clone(),
name: linkdef.name().to_string(),
source_id: linkdef.source_id.clone(),
target: linkdef.target.clone(),
wit_namespace: linkdef.wit_namespace.clone(),
wit_package: linkdef.wit_package.clone(),
wit_interfaces: linkdef.interfaces.clone(),
name: linkdef.name.clone(),
source_config: vec![],
target_config: vec![],
lattice_id: lattice_id.clone(),
@ -472,7 +515,7 @@ mod test {
async fn can_put_linkdef_from_triggering_events() {
let lattice_id = "can_put_linkdef_from_triggering_events";
let echo_ref = "fakecloud.azurecr.io/echo:0.3.4".to_string();
let echo_id = "MASDASDIAMAREALCOMPONENTECHO";
let echo_id = "MASDASDIAMAREALACTORECHO";
let httpserver_ref = "fakecloud.azurecr.io/httpserver:0.5.2".to_string();
let host_id_one = "NASDASDIMAREALHOSTONE";
@ -549,7 +592,7 @@ mod test {
// Since no link exists, we should expect a put link command
assert_eq!(commands.len(), 1);
// Component starts, put into state and then handle event
// Actor starts, put into state and then handle event
store
.store(
lattice_id,
@ -561,7 +604,7 @@ mod test {
},
)
.await
.expect("should be able to store component");
.expect("should be able to store actor");
let commands = link_scaler
.handle_event(&Event::ComponentScaled(ComponentScaled {
@ -576,21 +619,23 @@ mod test {
host_id: host_id_one.to_string(),
}))
.await
.expect("should be able to handle components started event");
.expect("should be able to handle actors started event");
assert_eq!(commands.len(), 1);
let commands = link_scaler
.handle_event(&Event::LinkdefSet(LinkdefSet {
linkdef: Link::builder()
// NOTE: contract, link, and provider id matches but the component is different
.source_id("nm0001772")
.target("VASDASD")
.wit_namespace("wasmcloud")
.wit_package("httpserver")
.name("default")
.build()
.unwrap(),
linkdef: InterfaceLinkDefinition {
// NOTE: contract, link, and provider id matches but the actor is different
source_id: "nm0001772".to_string(),
target: "VASDASD".to_string(),
wit_namespace: "wasmcloud".to_string(),
wit_package: "httpserver".to_string(),
interfaces: vec![],
name: "default".to_string(),
source_config: vec![],
target_config: vec![],
},
}))
.await
.expect("");

File diff suppressed because it is too large Load Diff

View File

@ -7,30 +7,26 @@ use anyhow::Result;
use async_trait::async_trait;
use tokio::sync::{OnceCell, RwLock};
use tracing::{instrument, trace};
use wadm_types::{
api::{StatusInfo, StatusType},
Spread, SpreadScalerProperty, TraitProperty,
};
use wadm_types::{api::StatusInfo, Spread, SpreadScalerProperty, TraitProperty};
use crate::{
commands::{Command, StartProvider, StopProvider},
events::{
ConfigSet, Event, HostHeartbeat, HostStarted, HostStopped, ProviderHealthCheckFailed,
ProviderHealthCheckInfo, ProviderHealthCheckPassed, ProviderInfo, ProviderStarted,
Event, HostHeartbeat, HostStarted, HostStopped, ProviderInfo, ProviderStarted,
ProviderStopped,
},
scaler::{
compute_id_sha256,
compute_config_hash,
spreadscaler::{
compute_ineligible_hosts, compute_spread, eligible_hosts, spreadscaler_annotations,
},
Scaler,
},
storage::{Host, Provider, ProviderStatus, ReadStore},
storage::{Host, ReadStore},
SCALER_KEY,
};
use super::SPREAD_SCALER_KIND;
pub const PROVIDER_SPREAD_SCALER_TYPE: &str = "providerspreadscaler";
/// Config for a ProviderSpreadConfig
#[derive(Clone)]
@ -71,14 +67,6 @@ impl<S: ReadStore + Send + Sync + Clone> Scaler for ProviderSpreadScaler<S> {
&self.id
}
fn kind(&self) -> &str {
SPREAD_SCALER_KIND
}
fn name(&self) -> String {
self.config.provider_id.to_string()
}
async fn status(&self) -> StatusInfo {
let _ = self.reconcile().await;
self.status.read().await.to_owned()
@ -119,65 +107,6 @@ impl<S: ReadStore + Send + Sync + Clone> Scaler for ProviderSpreadScaler<S> {
{
self.reconcile().await
}
// perform status updates for health check events
Event::ProviderHealthCheckFailed(ProviderHealthCheckFailed {
data: ProviderHealthCheckInfo { provider_id, .. },
})
| Event::ProviderHealthCheckPassed(ProviderHealthCheckPassed {
data: ProviderHealthCheckInfo { provider_id, .. },
}) if provider_id == &self.config.provider_id => {
let provider = self
.store
.get::<Provider>(&self.config.lattice_id, &self.config.provider_id)
.await?;
let unhealthy_providers = provider.map_or(0, |p| {
p.hosts
.values()
.filter(|s| *s == &ProviderStatus::Failed)
.count()
});
let status = self.status.read().await.to_owned();
// update health status of scaler
if let Some(status) = match (status, unhealthy_providers > 0) {
// scaler is deployed but contains unhealthy providers
(
StatusInfo {
status_type: StatusType::Deployed,
..
},
true,
) => Some(StatusInfo::failed(&format!(
"Unhealthy provider on {} host(s)",
unhealthy_providers
))),
// scaler can become unhealthy only if it was previously deployed
// once scaler becomes healthy again revert back to deployed state
// this is a workaround to detect unhealthy status until
// StatusType::Unhealthy can be used
(
StatusInfo {
status_type: StatusType::Failed,
message,
},
false,
) if message.starts_with("Unhealthy provider on") => {
Some(StatusInfo::deployed(""))
}
// don't update status if scaler is not deployed
_ => None,
} {
*self.status.write().await = status;
}
// only status needs update no new commands required
Ok(Vec::new())
}
Event::ConfigSet(ConfigSet { config_name })
if self.config.provider_config.contains(config_name) =>
{
self.reconcile().await
}
// No other event impacts the job of this scaler so we can ignore it
_ => Ok(Vec::new()),
}
@ -248,11 +177,11 @@ impl<S: ReadStore + Send + Sync + Clone> Scaler for ProviderSpreadScaler<S> {
let (running, other): (HashMap<&String, &Host>, HashMap<&String, &Host>) =
eligible_hosts.into_iter().partition(|(_host_id, host)| {
host.providers
.contains(&ProviderInfo {
.get(&ProviderInfo {
provider_id: provider_id.to_string(),
provider_ref: provider_ref.to_string(),
annotations: BTreeMap::default(),
})
}).is_some()
});
// Get the count of all running providers
let current_running = running.len();
@ -347,21 +276,16 @@ impl<S: ReadStore + Send + Sync + Clone> Scaler for ProviderSpreadScaler<S> {
trace!(?commands, "Calculated commands for provider scaler");
let status = match (spread_status.is_empty(), commands.is_empty()) {
// No failures, no commands, scaler satisfied
(true, true) => StatusInfo::deployed(""),
// No failures, commands generated, scaler is reconciling
(true, false) => {
StatusInfo::reconciling(&format!("Scaling provider on {} host(s)", commands.len()))
}
// Failures occurred, scaler is in a failed state
(false, _) => StatusInfo::failed(
let status = if spread_status.is_empty() {
StatusInfo::deployed("")
} else {
StatusInfo::failed(
&spread_status
.into_iter()
.map(|s| s.message)
.collect::<Vec<String>>()
.join(" "),
),
)
};
trace!(?status, "Updating scaler status");
*self.status.write().await = status;
@ -391,23 +315,14 @@ impl<S: ReadStore + Send + Sync + Clone> Scaler for ProviderSpreadScaler<S> {
impl<S: ReadStore + Send + Sync> ProviderSpreadScaler<S> {
/// Construct a new ProviderSpreadScaler with specified configuration values
pub fn new(store: S, config: ProviderSpreadConfig, component_name: &str) -> Self {
// Compute the id of this scaler based on all of the configuration values
// that make it unique. This is used during upgrades to determine if a
// scaler is the same as a previous one.
let mut id_parts = vec![
SPREAD_SCALER_KIND,
&config.model_name,
component_name,
&config.provider_id,
&config.provider_reference,
];
id_parts.extend(
config
.provider_config
.iter()
.map(std::string::String::as_str),
let mut id = format!(
"{PROVIDER_SPREAD_SCALER_TYPE}-{}-{component_name}-{}",
config.model_name, config.provider_id,
);
let id = compute_id_sha256(&id_parts);
if !config.provider_config.is_empty() {
id.push('-');
id.push_str(&compute_config_hash(&config.provider_config))
}
Self {
store,
@ -447,7 +362,7 @@ mod test {
const MODEL_NAME: &str = "test_provider_spreadscaler";
#[test]
fn test_different_ids() {
fn test_id_generator() {
let config = ProviderSpreadConfig {
lattice_id: "lattice".to_string(),
provider_reference: "provider_ref".to_string(),
@ -460,8 +375,15 @@ mod test {
provider_config: vec![],
};
let scaler1 =
ProviderSpreadScaler::new(Arc::new(TestStore::default()), config, "component");
let scaler = ProviderSpreadScaler::new(Arc::new(TestStore::default()), config, "component");
assert_eq!(
scaler.id(),
format!(
"{PROVIDER_SPREAD_SCALER_TYPE}-{}-component-provider_id",
MODEL_NAME
),
"ProviderSpreadScaler ID should be valid"
);
let config = ProviderSpreadConfig {
lattice_id: "lattice".to_string(),
@ -475,12 +397,27 @@ mod test {
provider_config: vec!["foobar".to_string()],
};
let scaler2 =
ProviderSpreadScaler::new(Arc::new(TestStore::default()), config, "component");
assert_ne!(
scaler1.id(),
scaler2.id(),
"ProviderSpreadScaler IDs should be different with different configuration"
let scaler = ProviderSpreadScaler::new(Arc::new(TestStore::default()), config, "component");
assert_eq!(
scaler.id(),
format!(
"{PROVIDER_SPREAD_SCALER_TYPE}-{}-component-provider_id-{}",
MODEL_NAME,
compute_config_hash(&["foobar".to_string()])
),
"ProviderSpreadScaler ID should be valid"
);
let mut scaler_id_tokens = scaler.id().split('-');
scaler_id_tokens.next_back();
let scaler_id_tokens = scaler_id_tokens.collect::<Vec<&str>>().join("-");
assert_eq!(
scaler_id_tokens,
format!(
"{PROVIDER_SPREAD_SCALER_TYPE}-{}-component-provider_id",
MODEL_NAME
),
"ProviderSpreadScaler ID should be valid and depends on provider_config"
);
}
@ -1269,274 +1206,4 @@ mod test {
Ok(())
}
#[tokio::test]
async fn test_healthy_providers_return_healthy_status() -> Result<()> {
let lattice_id = "test_healthy_providers";
let provider_ref = "fakecloud.azurecr.io/provider:3.2.1".to_string();
let provider_id = "VASDASDIAMAREALPROVIDERPROVIDER";
let host_id_one = "NASDASDIMAREALHOSTONE";
let host_id_two = "NASDASDIMAREALHOSTTWO";
let store = Arc::new(TestStore::default());
store
.store(
lattice_id,
host_id_one.to_string(),
Host {
components: HashMap::new(),
friendly_name: "hey".to_string(),
labels: HashMap::from_iter([
("cloud".to_string(), "fake".to_string()),
("region".to_string(), "us-noneofyourbusiness-1".to_string()),
]),
providers: HashSet::from_iter([ProviderInfo {
provider_id: provider_id.to_string(),
provider_ref: provider_ref.to_string(),
annotations: BTreeMap::default(),
}]),
uptime_seconds: 123,
version: None,
id: host_id_one.to_string(),
last_seen: Utc::now(),
},
)
.await?;
// Ensure we spread evenly with equal weights, clean division
let multi_spread_even = SpreadScalerProperty {
instances: 2,
spread: vec![Spread {
name: "SimpleOne".to_string(),
requirements: BTreeMap::from_iter([("cloud".to_string(), "fake".to_string())]),
weight: Some(100),
}],
};
let spreadscaler = ProviderSpreadScaler::new(
store.clone(),
ProviderSpreadConfig {
lattice_id: lattice_id.to_string(),
provider_reference: provider_ref.to_string(),
provider_id: provider_id.to_string(),
spread_config: multi_spread_even,
model_name: MODEL_NAME.to_string(),
provider_config: vec!["foobar".to_string()],
},
"fake_component",
);
store
.store(
lattice_id,
host_id_two.to_string(),
Host {
components: HashMap::new(),
friendly_name: "hey".to_string(),
labels: HashMap::from_iter([
("cloud".to_string(), "fake".to_string()),
("region".to_string(), "us-yourhouse-1".to_string()),
]),
providers: HashSet::from_iter([ProviderInfo {
provider_id: provider_id.to_string(),
provider_ref: provider_ref.to_string(),
annotations: spreadscaler_annotations("SimpleOne", spreadscaler.id()),
}]),
uptime_seconds: 123,
version: None,
id: host_id_two.to_string(),
last_seen: Utc::now(),
},
)
.await?;
store
.store(
lattice_id,
provider_id.to_string(),
Provider {
id: provider_id.to_string(),
name: "provider".to_string(),
issuer: "issuer".to_string(),
reference: provider_ref.to_string(),
hosts: HashMap::from([
(host_id_one.to_string(), ProviderStatus::Failed),
(host_id_two.to_string(), ProviderStatus::Running),
]),
},
)
.await?;
spreadscaler.reconcile().await?;
spreadscaler
.handle_event(&Event::ProviderHealthCheckFailed(
ProviderHealthCheckFailed {
data: ProviderHealthCheckInfo {
provider_id: provider_id.to_string(),
host_id: host_id_one.to_string(),
},
},
))
.await?;
store
.store(
lattice_id,
provider_id.to_string(),
Provider {
id: provider_id.to_string(),
name: "provider".to_string(),
issuer: "issuer".to_string(),
reference: provider_ref.to_string(),
hosts: HashMap::from([
(host_id_one.to_string(), ProviderStatus::Pending),
(host_id_two.to_string(), ProviderStatus::Running),
]),
},
)
.await?;
spreadscaler
.handle_event(&Event::ProviderHealthCheckPassed(
ProviderHealthCheckPassed {
data: ProviderHealthCheckInfo {
provider_id: provider_id.to_string(),
host_id: host_id_two.to_string(),
},
},
))
.await?;
assert_eq!(
spreadscaler.status.read().await.to_owned(),
StatusInfo::deployed("")
);
Ok(())
}
#[tokio::test]
async fn test_unhealthy_providers_return_unhealthy_status() -> Result<()> {
let lattice_id = "test_unhealthy_providers";
let provider_ref = "fakecloud.azurecr.io/provider:3.2.1".to_string();
let provider_id = "VASDASDIAMAREALPROVIDERPROVIDER";
let host_id_one = "NASDASDIMAREALHOSTONE";
let host_id_two = "NASDASDIMAREALHOSTTWO";
let store = Arc::new(TestStore::default());
store
.store(
lattice_id,
host_id_one.to_string(),
Host {
components: HashMap::new(),
friendly_name: "hey".to_string(),
labels: HashMap::from_iter([
("cloud".to_string(), "fake".to_string()),
("region".to_string(), "us-noneofyourbusiness-1".to_string()),
]),
providers: HashSet::from_iter([ProviderInfo {
provider_id: provider_id.to_string(),
provider_ref: provider_ref.to_string(),
annotations: BTreeMap::default(),
}]),
uptime_seconds: 123,
version: None,
id: host_id_one.to_string(),
last_seen: Utc::now(),
},
)
.await?;
// Ensure we spread evenly with equal weights, clean division
let multi_spread_even = SpreadScalerProperty {
instances: 2,
spread: vec![Spread {
name: "SimpleOne".to_string(),
requirements: BTreeMap::from_iter([("cloud".to_string(), "fake".to_string())]),
weight: Some(100),
}],
};
let spreadscaler = ProviderSpreadScaler::new(
store.clone(),
ProviderSpreadConfig {
lattice_id: lattice_id.to_string(),
provider_reference: provider_ref.to_string(),
provider_id: provider_id.to_string(),
spread_config: multi_spread_even,
model_name: MODEL_NAME.to_string(),
provider_config: vec!["foobar".to_string()],
},
"fake_component",
);
store
.store(
lattice_id,
host_id_two.to_string(),
Host {
components: HashMap::new(),
friendly_name: "hey".to_string(),
labels: HashMap::from_iter([
("cloud".to_string(), "fake".to_string()),
("region".to_string(), "us-yourhouse-1".to_string()),
]),
providers: HashSet::from_iter([ProviderInfo {
provider_id: provider_id.to_string(),
provider_ref: provider_ref.to_string(),
annotations: spreadscaler_annotations("SimpleOne", spreadscaler.id()),
}]),
uptime_seconds: 123,
version: None,
id: host_id_two.to_string(),
last_seen: Utc::now(),
},
)
.await?;
store
.store(
lattice_id,
provider_id.to_string(),
Provider {
id: provider_id.to_string(),
name: "provider".to_string(),
issuer: "issuer".to_string(),
reference: provider_ref.to_string(),
hosts: HashMap::from([
(host_id_one.to_string(), ProviderStatus::Failed),
(host_id_two.to_string(), ProviderStatus::Running),
]),
},
)
.await?;
spreadscaler.reconcile().await?;
spreadscaler
.handle_event(&Event::ProviderHealthCheckFailed(
ProviderHealthCheckFailed {
data: ProviderHealthCheckInfo {
provider_id: provider_id.to_string(),
host_id: host_id_one.to_string(),
},
},
))
.await?;
assert_eq!(
spreadscaler.status.read().await.to_owned(),
StatusInfo::failed("Unhealthy provider on 1 host(s)")
);
Ok(())
}
}

View File

@ -1,66 +0,0 @@
use anyhow::Result;
use async_trait::async_trait;
use wadm_types::{api::StatusInfo, TraitProperty};
use crate::{commands::Command, events::Event, scaler::Scaler};
/// The StatusScaler is a scaler that only reports a predefined status and does not perform any actions.
/// It's primarily used as a placeholder for a scaler that wadm failed to initialize for reasons that
/// couldn't be caught during deployment, and will not be fixed until a new version of the app is deployed.
pub struct StatusScaler {
id: String,
kind: String,
name: String,
status: StatusInfo,
}
#[async_trait]
impl Scaler for StatusScaler {
fn id(&self) -> &str {
&self.id
}
fn kind(&self) -> &str {
&self.kind
}
fn name(&self) -> String {
self.name.to_string()
}
async fn status(&self) -> StatusInfo {
self.status.clone()
}
async fn update_config(&mut self, _config: TraitProperty) -> Result<Vec<Command>> {
Ok(vec![])
}
async fn handle_event(&self, _event: &Event) -> Result<Vec<Command>> {
Ok(Vec::with_capacity(0))
}
async fn reconcile(&self) -> Result<Vec<Command>> {
Ok(Vec::with_capacity(0))
}
async fn cleanup(&self) -> Result<Vec<Command>> {
Ok(Vec::with_capacity(0))
}
}
impl StatusScaler {
pub fn new(
id: impl AsRef<str>,
kind: impl AsRef<str>,
name: impl AsRef<str>,
status: StatusInfo,
) -> Self {
StatusScaler {
id: id.as_ref().to_string(),
kind: kind.as_ref().to_string(),
name: name.as_ref().to_string(),
status,
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -118,12 +118,7 @@ impl<P: Publisher> Server<P> {
category: "model",
operation: "list",
object_name: None,
} => {
warn!("Received deprecated subject: model.list. Please use model.get instead");
self.handler
.list_models_deprecated(msg, account_id, lattice_id)
.await
}
} => self.handler.list_models(msg, account_id, lattice_id).await,
ParsedSubject {
account_id,
lattice_id,
@ -135,13 +130,6 @@ impl<P: Publisher> Server<P> {
.get_model(msg, account_id, lattice_id, name)
.await
}
ParsedSubject {
account_id,
lattice_id,
category: "model",
operation: "get",
object_name: None,
} => self.handler.list_models(msg, account_id, lattice_id).await,
ParsedSubject {
account_id,
lattice_id,

View File

@ -25,19 +25,14 @@ impl<P: Publisher> ManifestNotifier<P> {
}
#[instrument(level = "trace", skip(self))]
async fn send_event(
&self,
lattice_id: &str,
event_subject_key: &str,
event: Event,
) -> anyhow::Result<()> {
async fn send_event(&self, lattice_id: &str, event: Event) -> anyhow::Result<()> {
let event: CloudEvent = event.try_into()?;
// NOTE(thomastaylor312): A future improvement could be retries here
trace!("Sending notification event");
self.publisher
.publish(
serde_json::to_vec(&event)?,
Some(&format!("{}.{lattice_id}.{event_subject_key}", self.prefix)),
Some(&format!("{}.{lattice_id}", self.prefix)),
)
.await
}
@ -45,7 +40,6 @@ impl<P: Publisher> ManifestNotifier<P> {
pub async fn deployed(&self, lattice_id: &str, manifest: Manifest) -> anyhow::Result<()> {
self.send_event(
lattice_id,
"manifest_published",
Event::ManifestPublished(ManifestPublished { manifest }),
)
.await
@ -54,7 +48,6 @@ impl<P: Publisher> ManifestNotifier<P> {
pub async fn undeployed(&self, lattice_id: &str, name: &str) -> anyhow::Result<()> {
self.send_event(
lattice_id,
"manifest_unpublished",
Event::ManifestUnpublished(ManifestUnpublished {
name: name.to_owned(),
}),

View File

@ -3,6 +3,7 @@ use std::collections::BTreeSet;
use anyhow::Result;
use async_nats::jetstream::kv::{Operation, Store};
use tracing::{debug, instrument, trace};
use wadm_types::api::{ModelSummary, StatusType};
use crate::model::StoredManifest;
@ -93,13 +94,13 @@ impl ModelStorage {
.await
}
/// Fetches a summary of all manifests for the given lattice.
/// Fetches a summary of all models in the given lattice.
#[instrument(level = "debug", skip(self))]
pub async fn list(
&self,
account_id: Option<&str>,
lattice_id: &str,
) -> Result<Vec<StoredManifest>> {
) -> Result<Vec<ModelSummary>> {
debug!("Fetching list of models from storage");
let futs = self
.get_model_set(account_id, lattice_id)
@ -108,11 +109,23 @@ impl ModelStorage {
.0
.into_iter()
// We can't use filter map with futures, but we can use map and then flatten it below
.map(|model_name| async move {
match self.get(account_id, lattice_id, &model_name).await {
Ok(Some((manifest, _))) => Some(Ok(manifest)),
Ok(None) => None,
Err(e) => Some(Err(e)),
.map(|model_name| {
async {
let manifest = match self.get(account_id, lattice_id, &model_name).await {
Ok(Some((manifest, _))) => manifest,
Ok(None) => return None,
Err(e) => return Some(Err(e)),
};
Some(Ok(ModelSummary {
name: model_name,
version: manifest.current_version().to_owned(),
description: manifest.get_current().description().map(|s| s.to_owned()),
deployed_version: manifest.get_deployed().map(|m| m.version().to_owned()),
// TODO(thomastaylor312): Actually fetch the status info from the stored
// manifest once we figure it out
status: StatusType::default(),
status_message: None,
}))
}
});
@ -121,7 +134,7 @@ impl ModelStorage {
.await
.into_iter()
.flatten()
.collect::<Result<Vec<StoredManifest>>>()
.collect::<Result<Vec<ModelSummary>>>()
}
/// Deletes the given model from storage. This also removes the model from the list of all

View File

@ -52,7 +52,7 @@ pub trait Store: ReadStore {
/// By default this will just call [`Store::store_many`] with a single item in the list of data
async fn store<T>(&self, lattice_id: &str, id: String, data: T) -> Result<(), Self::Error>
where
T: Serialize + DeserializeOwned + StateKind + Send + Sync + Clone, // Needs to be clone in order to retry updates
T: Serialize + DeserializeOwned + StateKind + Send,
{
self.store_many(lattice_id, [(id, data)]).await
}
@ -62,7 +62,7 @@ pub trait Store: ReadStore {
///
/// The given data can be anything that can be turned into an iterator of (key, value). This
/// means you can pass a [`HashMap`](std::collections::HashMap) or something like
/// `["key".to_string(), Component{...}]`
/// `["key".to_string(), Actor{...}]`
///
/// This function has several required bounds. It needs to be serialize and deserialize because
/// some implementations will need to deserialize the current data before modifying it.
@ -71,7 +71,7 @@ pub trait Store: ReadStore {
/// sendable between threads
async fn store_many<T, D>(&self, lattice_id: &str, data: D) -> Result<(), Self::Error>
where
T: Serialize + DeserializeOwned + StateKind + Send + Sync + Clone, // Needs to be clone in order to retry updates
T: Serialize + DeserializeOwned + StateKind + Send,
D: IntoIterator<Item = (String, T)> + Send;
/// Delete a state entry
@ -79,7 +79,7 @@ pub trait Store: ReadStore {
/// By default this will just call [`Store::delete_many`] with a single item in the list of data
async fn delete<T>(&self, lattice_id: &str, id: &str) -> Result<(), Self::Error>
where
T: Serialize + DeserializeOwned + StateKind + Send + Sync,
T: Serialize + DeserializeOwned + StateKind + Send,
{
self.delete_many::<T, _, _>(lattice_id, [id]).await
}
@ -97,7 +97,7 @@ pub trait Store: ReadStore {
/// sendable between threads
async fn delete_many<T, D, K>(&self, lattice_id: &str, data: D) -> Result<(), Self::Error>
where
T: Serialize + DeserializeOwned + StateKind + Send + Sync,
T: Serialize + DeserializeOwned + StateKind + Send,
D: IntoIterator<Item = K> + Send,
K: AsRef<str>;
}
@ -107,7 +107,7 @@ pub trait Store: ReadStore {
impl<S: Store + Send + Sync> Store for std::sync::Arc<S> {
async fn store_many<T, D>(&self, lattice_id: &str, data: D) -> Result<(), Self::Error>
where
T: Serialize + DeserializeOwned + StateKind + Send + Sync + Clone,
T: Serialize + DeserializeOwned + StateKind + Send,
D: IntoIterator<Item = (String, T)> + Send,
{
self.as_ref().store_many(lattice_id, data).await
@ -115,7 +115,7 @@ impl<S: Store + Send + Sync> Store for std::sync::Arc<S> {
async fn delete_many<T, D, K>(&self, lattice_id: &str, data: D) -> Result<(), Self::Error>
where
T: Serialize + DeserializeOwned + StateKind + Send + Sync,
T: Serialize + DeserializeOwned + StateKind + Send,
D: IntoIterator<Item = K> + Send,
K: AsRef<str>,
{
@ -210,7 +210,7 @@ impl<S: Store + Sync> ScopedStore<S> {
/// Store a piece of state. This should overwrite existing state entries
pub async fn store<T>(&self, id: String, data: T) -> Result<(), S::Error>
where
T: Serialize + DeserializeOwned + StateKind + Send + Sync + Clone,
T: Serialize + DeserializeOwned + StateKind + Send,
{
self.inner.store(&self.lattice_id, id, data).await
}
@ -219,7 +219,7 @@ impl<S: Store + Sync> ScopedStore<S> {
/// allows for stores to perform multiple writes simultaneously or to leverage transactions
pub async fn store_many<T, D>(&self, data: D) -> Result<(), S::Error>
where
T: Serialize + DeserializeOwned + StateKind + Send + Sync + Clone,
T: Serialize + DeserializeOwned + StateKind + Send,
D: IntoIterator<Item = (String, T)> + Send,
{
self.inner.store_many(&self.lattice_id, data).await
@ -228,7 +228,7 @@ impl<S: Store + Sync> ScopedStore<S> {
/// Delete a state entry
pub async fn delete<T>(&self, id: &str) -> Result<(), S::Error>
where
T: Serialize + DeserializeOwned + StateKind + Send + Sync,
T: Serialize + DeserializeOwned + StateKind + Send,
{
self.inner.delete::<T>(&self.lattice_id, id).await
}
@ -237,7 +237,7 @@ impl<S: Store + Sync> ScopedStore<S> {
/// simultaneously or to leverage transactions
pub async fn delete_many<T, D, K>(&self, data: D) -> Result<(), S::Error>
where
T: Serialize + DeserializeOwned + StateKind + Send + Sync,
T: Serialize + DeserializeOwned + StateKind + Send,
D: IntoIterator<Item = K> + Send,
K: AsRef<str>,
{

View File

@ -9,19 +9,17 @@
//! the encoding in the future. Because of this, DO NOT depend on accessing this data other than
//! through this module
//!
//! All data is currently stored in a single encoded map per type (host, component, provider), where
//! the keys are the ID as given by [`StateId::id`]. Once again, we reserve the right to change this
//! All data is currently stored in a single encoded map per type (host, actor, provider), where the
//! keys are the ID as given by [`StateId::id`]. Once again, we reserve the right to change this
//! structure in the future
use std::collections::HashMap;
use std::io::Error as IoError;
use std::time::Duration;
use async_nats::{
jetstream::kv::{Operation, Store as KvStore},
Error as NatsError,
};
use async_trait::async_trait;
use futures::Future;
use serde::{de::DeserializeOwned, Serialize};
use tracing::{debug, error, field::Empty, instrument, trace};
use tracing_futures::Instrument;
@ -91,73 +89,6 @@ impl NatsKvStore {
Err(e) => Err(NatsStoreError::Nats(e.into())),
}
}
/// Helper that retries update operations
// NOTE(thomastaylor312): We could probably make this even better with some exponential backoff,
// but this is easy enough for now since generally there isn't a ton of competition for updating
// a single lattice
async fn update_with_retries<T, F, Fut>(
&self,
lattice_id: &str,
key: &str,
timeout: Duration,
updater: F,
) -> Result<(), NatsStoreError>
where
T: Serialize + DeserializeOwned + StateKind + Send,
F: Fn(HashMap<String, T>) -> Fut,
Fut: Future<Output = Result<Vec<u8>, NatsStoreError>>,
{
let res = tokio::time::timeout(timeout, async {
loop {
let (current_data, revision) = self
.internal_list::<T>(lattice_id)
.in_current_span()
.await?;
debug!(revision, "Updating data in store");
let updated_data = updater(current_data).await?;
trace!("Writing bytes to store");
// If the function doesn't return any data (such as for deletes), just return early.
// Everything is an update (right now), even for deletes so the only case we'd have
// an empty vec is if we aren't updating anything
if updated_data.is_empty() {
return Ok(())
}
match self.store.update(key, updated_data.into(), revision).await {
Ok(_) => return Ok(()),
Err(e) => {
if e.to_string().contains("wrong last sequence") {
debug!(%key, %lattice_id, "Got wrong last sequence when trying to update state. Retrying update operation");
continue;
}
return Err(NatsStoreError::Nats(e.into()));
}
// TODO(#316): Uncomment this code once we can update to the latest
// async-nats, which actually allows us to access the inner source of the error
// Err(e) => {
// let source = match e.source() {
// Some(s) => s,
// None => return Err(NatsStoreError::Nats(e.into())),
// };
// match source.downcast_ref::<PublishError>() {
// Some(e) if matches!(e.kind(), PublishErrorKind::WrongLastSequence) => {
// debug!(%key, %lattice_id, "Got wrong last sequence when trying to update state. Retrying update operation");
// continue;
// },
// _ => return Err(NatsStoreError::Nats(e.into())),
// }
// }
}
}
})
.await;
match res {
Err(_e) => Err(NatsStoreError::Other(
"Timed out while retrying updates to key".to_string(),
)),
Ok(res2) => res2,
}
}
}
// NOTE(thomastaylor312): This implementation should be good enough to start. If we need to optimize
@ -210,7 +141,7 @@ impl Store for NatsKvStore {
///
/// The given data can be anything that can be turned into an iterator of (key, value). This
/// means you can pass a [`HashMap`](std::collections::HashMap) or something like
/// `["key".to_string(), Component{...}]`
/// `["key".to_string(), Actor{...}]`
///
/// This function has several required bounds. It needs to be serialize and deserialize because
/// some implementations will need to deserialize the current data before modifying it.
@ -220,76 +151,79 @@ impl Store for NatsKvStore {
#[instrument(level = "debug", skip(self, data), fields(key = Empty))]
async fn store_many<T, D>(&self, lattice_id: &str, data: D) -> Result<(), Self::Error>
where
T: Serialize + DeserializeOwned + StateKind + Send + Sync + Clone,
T: Serialize + DeserializeOwned + StateKind + Send,
D: IntoIterator<Item = (String, T)> + Send,
{
let key = generate_key::<T>(lattice_id);
tracing::Span::current().record("key", &key);
let data: Vec<(String, T)> = data.into_iter().collect();
self.update_with_retries(
lattice_id,
&key,
Duration::from_millis(1500),
|mut current_data| async {
let cloned = data.clone();
async move {
for (id, item) in cloned.into_iter() {
if current_data.insert(id, item).is_some() {
// NOTE: We may want to return the old data in the future. For now, keeping it simple
trace!("Replaced existing data");
} else {
trace!("Inserted new entry");
};
}
serde_json::to_vec(&current_data).map_err(NatsStoreError::SerDe)
}
.await
},
)
.in_current_span()
.await
let (mut current_data, revision) = self
.internal_list::<T>(lattice_id)
.in_current_span()
.await?;
debug!("Updating data in store");
for (id, item) in data.into_iter() {
if current_data.insert(id, item).is_some() {
// NOTE: We may want to return the old data in the future. For now, keeping it simple
trace!("Replaced existing data");
} else {
trace!("Inserted new entry");
};
}
let serialized = serde_json::to_vec(&current_data)?;
// NOTE(thomastaylor312): This could not matter, but because this is JSON and not consuming
// the data it is serializing, we are now holding a vec of the serialized data and the
// actual struct in memory. So this drops it immediately to hopefully keep memory usage down
// on busy servers
drop(current_data);
trace!(len = serialized.len(), "Writing bytes to store");
self.store
.update(key, serialized.into(), revision)
.await
.map(|_| ())
.map_err(|e| NatsStoreError::Nats(e.into()))
}
#[instrument(level = "debug", skip(self, data), fields(key = Empty))]
async fn delete_many<T, D, K>(&self, lattice_id: &str, data: D) -> Result<(), Self::Error>
where
T: Serialize + DeserializeOwned + StateKind + Send + Sync,
T: Serialize + DeserializeOwned + StateKind + Send,
D: IntoIterator<Item = K> + Send,
K: AsRef<str>,
{
let key = generate_key::<T>(lattice_id);
tracing::Span::current().record("key", &key);
let (mut current_data, revision) = self
.internal_list::<T>(lattice_id)
.in_current_span()
.await?;
debug!("Updating data in store");
let mut updated = false;
for id in data.into_iter() {
if current_data.remove(id.as_ref()).is_some() {
// NOTE: We may want to return the old data in the future. For now, keeping it simple
trace!(id = %id.as_ref(), "Removing existing data");
updated = true;
} else {
trace!(id = %id.as_ref(), "ID doesn't exist in store, ignoring");
};
}
// If we updated nothing, return early
if !updated {
return Ok(());
}
let data: Vec<String> = data.into_iter().map(|s| s.as_ref().to_string()).collect();
self.update_with_retries(
lattice_id,
&key,
Duration::from_millis(1500),
|mut current_data: HashMap<String, T>| async {
let cloned = data.clone();
async move {
let mut updated = false;
for id in cloned.into_iter() {
if current_data.remove(&id).is_some() {
// NOTE: We may want to return the old data in the future. For now, keeping it simple
trace!(%id, "Removing existing data");
updated = true;
} else {
trace!(%id, "ID doesn't exist in store, ignoring");
};
}
// If we updated nothing, return early
if !updated {
return Ok(Vec::with_capacity(0));
}
serde_json::to_vec(&current_data).map_err(NatsStoreError::SerDe)
}
.await
},
)
.in_current_span()
.await
let serialized = serde_json::to_vec(&current_data)?;
// NOTE(thomastaylor312): This could not matter, but because this is JSON and not consuming
// the data it is serializing, we are now holding a vec of the serialized data and the
// actual struct in memory. So this drops it immediately to hopefully keep memory usage down
// on busy servers
drop(current_data);
trace!(len = serialized.len(), "Writing bytes to store");
self.store
.update(key, serialized.into(), revision)
.await
.map(|_| ())
.map_err(|e| NatsStoreError::Nats(e.into()))
}
}

View File

@ -1,5 +1,5 @@
//! Contains helpers for reaping Hosts that haven't received a heartbeat within a configured amount
//! of time and components and providers on hosts that no longer exist
//! of time and actors and providers on hosts that no longer exist
use std::collections::HashMap;
@ -102,7 +102,7 @@ impl<S: Store + Clone + Send + Sync + 'static> Undertaker<S> {
loop {
ticker.tick().await;
trace!("Tick fired, running reap tasks");
// We want to reap hosts first so that the state is up to date for reaping components and providers
// We want to reap hosts first so that the state is up to date for reaping actors and providers
self.reap_hosts().await;
// Now get the current list of hosts
let hosts = match self.store.list::<Host>(&self.lattice_id).await {
@ -112,8 +112,8 @@ impl<S: Store + Clone + Send + Sync + 'static> Undertaker<S> {
continue;
}
};
// Reap components and providers
self.reap_components(&hosts).await;
// Reap actors and providers
self.reap_actors(&hosts).await;
self.reap_providers(&hosts).await;
trace!("Completed reap tasks");
}
@ -152,49 +152,49 @@ impl<S: Store + Clone + Send + Sync + 'static> Undertaker<S> {
}
#[instrument(level = "debug", skip(self, hosts), fields(lattice_id = %self.lattice_id))]
async fn reap_components(&self, hosts: &HashMap<String, Host>) {
let components = match self.store.list::<Component>(&self.lattice_id).await {
async fn reap_actors(&self, hosts: &HashMap<String, Host>) {
let actors = match self.store.list::<Component>(&self.lattice_id).await {
Ok(n) => n,
Err(e) => {
error!(error = %e, "Error when fetching components from store. Will retry on next tick");
error!(error = %e, "Error when fetching actors from store. Will retry on next tick");
return;
}
};
let (components_to_remove, components_to_update): (
let (actors_to_remove, actors_to_update): (
HashMap<String, Component>,
HashMap<String, Component>,
) = components
) = actors
.into_iter()
.map(|(id, mut component)| {
// Only keep the instances where the host exists and the component is in its map
component.instances.retain(|host_id, _| {
.map(|(id, mut actor)| {
// Only keep the instances where the host exists and the actor is in its map
actor.instances.retain(|host_id, _| {
hosts
.get(host_id)
.map(|host| host.components.contains_key(&component.id))
.map(|host| host.components.contains_key(&actor.id))
.unwrap_or(false)
});
(id, component)
(id, actor)
})
.partition(|(_, component)| component.instances.is_empty());
.partition(|(_, actor)| actor.instances.is_empty());
debug!(to_remove = %components_to_remove.len(), to_update = %components_to_update.len(), "Filtered out list of components to update and reap");
debug!(to_remove = %actors_to_remove.len(), to_update = %actors_to_update.len(), "Filtered out list of actors to update and reap");
if let Err(e) = self
.store
.store_many(&self.lattice_id, components_to_update)
.store_many(&self.lattice_id, actors_to_update)
.await
{
warn!(error = %e, "Error when storing updated components. Will retry on next tick");
warn!(error = %e, "Error when storing updated actors. Will retry on next tick");
return;
}
if let Err(e) = self
.store
.delete_many::<Component, _, _>(&self.lattice_id, components_to_remove.keys())
.delete_many::<Component, _, _>(&self.lattice_id, actors_to_remove.keys())
.await
{
warn!(error = %e, "Error when deleting components from store. Will retry on next tick")
warn!(error = %e, "Error when deleting actors from store. Will retry on next tick")
}
}
@ -203,7 +203,7 @@ impl<S: Store + Clone + Send + Sync + 'static> Undertaker<S> {
let providers = match self.store.list::<Provider>(&self.lattice_id).await {
Ok(n) => n,
Err(e) => {
error!(error = %e, "Error when fetching components from store. Will retry on next tick");
error!(error = %e, "Error when fetching actors from store. Will retry on next tick");
return;
}
};
@ -263,7 +263,7 @@ mod test {
let store = Arc::new(TestStore::default());
let lattice_id = "reaper";
let component_id = "testcomponent";
let component_id = "testactor";
let host1_id = "host1";
let host2_id = "host2";
@ -365,18 +365,14 @@ mod test {
// Wait for first node to be reaped (two ticks)
tokio::time::sleep(wait * 2).await;
// Now check that the providers, components, and hosts were reaped
// Now check that the providers, actors, and hosts were reaped
let hosts = store.list::<Host>(lattice_id).await.unwrap();
assert_eq!(hosts.len(), 1, "Only one host should be left");
let components = store.list::<Component>(lattice_id).await.unwrap();
assert_eq!(
components.len(),
1,
"Only one component should remain in the store"
);
components
let actors = store.list::<Component>(lattice_id).await.unwrap();
assert_eq!(actors.len(), 1, "Only one actor should remain in the store");
actors
.get(component_id)
.expect("Should have the correct component in the store");
.expect("Should have the correct actor in the store");
assert!(
store.list::<Provider>(lattice_id).await.unwrap().is_empty(),
@ -385,11 +381,11 @@ mod test {
}
#[tokio::test]
async fn test_stale_component() {
async fn test_stale_actor() {
let store = Arc::new(TestStore::default());
let lattice_id = "reaper";
let component_id = "testcomponent";
let component_id = "testactor";
let host1_id = "host1";
let host2_id = "host2";
@ -459,11 +455,11 @@ mod test {
// Wait for first tick
tokio::time::sleep(wait).await;
// Make sure we only have one instance of the component left
let components = store.list::<Component>(lattice_id).await.unwrap();
let component = components
// Make sure we only have one instance of the actor left
let actors = store.list::<Component>(lattice_id).await.unwrap();
let component = actors
.get(component_id)
.expect("Should have the correct component in the store");
.expect("Should have the correct actor in the store");
assert_eq!(
component.instances.len(),
1,

View File

@ -2,12 +2,10 @@ use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::RwLock;
use tracing::debug;
use wasmcloud_control_interface::Link;
use wasmcloud_secrets_types::SecretConfig;
use wasmcloud_control_interface::InterfaceLinkDefinition;
use crate::storage::{Component, Host, Provider, ReadStore, StateKind};
use crate::workers::{ConfigSource, LinkSource, SecretSource};
use crate::workers::{ConfigSource, LinkSource};
// NOTE(thomastaylor312): This type is real ugly and we should probably find a better way to
// structure the ReadStore trait so it doesn't have the generic T we have to work around here. This
@ -28,7 +26,7 @@ pub struct SnapshotStore<S, L> {
lattice_source: L,
lattice_id: String,
stored_state: Arc<RwLock<InMemoryData>>,
links: Arc<RwLock<Vec<Link>>>,
links: Arc<RwLock<Vec<InterfaceLinkDefinition>>>,
}
impl<S, L> Clone for SnapshotStore<S, L>
@ -50,7 +48,7 @@ where
impl<S, L> SnapshotStore<S, L>
where
S: ReadStore,
L: LinkSource + ConfigSource + SecretSource,
L: LinkSource + ConfigSource,
{
/// Creates a new snapshot store that is scoped to the given lattice ID
pub fn new(store: S, lattice_source: L, lattice_id: String) -> Self {
@ -73,7 +71,7 @@ where
.into_iter()
.map(|(key, val)| (key, serde_json::to_value(val).unwrap()))
.collect::<HashMap<_, _>>();
let components = self
let actors = self
.store
.list::<Component>(&self.lattice_id)
.await?
@ -87,22 +85,17 @@ where
.into_iter()
.map(|(key, val)| (key, serde_json::to_value(val).unwrap()))
.collect::<HashMap<_, _>>();
// If we fail to get the links, that likely just means the lattice source is down, so we
// just fall back on what we have cached
if let Ok(links) = self.lattice_source.get_links().await {
*self.links.write().await = links;
} else {
debug!("Failed to get links from lattice source, using cached links");
};
let links = self.lattice_source.get_links().await?;
{
let mut stored_state = self.stored_state.write().await;
stored_state.insert(Provider::KIND.to_owned(), providers);
stored_state.insert(Component::KIND.to_owned(), components);
stored_state.insert(Component::KIND.to_owned(), actors);
stored_state.insert(Host::KIND.to_owned(), hosts);
}
*self.links.write().await = links;
Ok(())
}
}
@ -165,7 +158,7 @@ where
S: Send + Sync,
L: Send + Sync,
{
async fn get_links(&self) -> anyhow::Result<Vec<Link>> {
async fn get_links(&self) -> anyhow::Result<Vec<InterfaceLinkDefinition>> {
Ok(self.links.read().await.clone())
}
}
@ -180,14 +173,3 @@ where
self.lattice_source.get_config(name).await
}
}
#[async_trait::async_trait]
impl<S, L> SecretSource for SnapshotStore<S, L>
where
S: Send + Sync,
L: SecretSource + Send + Sync,
{
async fn get_secret(&self, name: &str) -> anyhow::Result<Option<SecretConfig>> {
self.lattice_source.get_secret(name).await
}
}

View File

@ -1,6 +1,8 @@
use std::borrow::{Borrow, ToOwned};
use std::collections::{BTreeMap, HashMap, HashSet};
use std::hash::{Hash, Hasher};
use std::{
borrow::Borrow,
collections::{BTreeMap, HashMap, HashSet},
hash::{Hash, Hasher},
};
use chrono::{DateTime, Utc};
use semver::Version;
@ -31,7 +33,7 @@ pub struct Provider {
pub hosts: HashMap<String, ProviderStatus>,
}
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
#[derive(Debug, Serialize, Deserialize, Clone)]
pub enum ProviderStatus {
/// The provider is starting and hasn't returned a heartbeat yet
Pending,
@ -40,7 +42,6 @@ pub enum ProviderStatus {
/// The provider failed to start
// TODO(thomastaylor312): In the future, we'll probably want to decay out a provider from state
// if it hasn't had a heartbeat
// if it fails a recent health check
Failed,
}
@ -50,17 +51,13 @@ impl Default for ProviderStatus {
}
}
impl std::fmt::Display for ProviderStatus {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{}",
match self {
Self::Pending => "pending".to_string(),
Self::Running => "running".to_string(),
Self::Failed => "failed".to_string(),
}
)
impl ToString for ProviderStatus {
fn to_string(&self) -> String {
match self {
Self::Pending => "pending".to_string(),
Self::Running => "running".to_string(),
Self::Failed => "failed".to_string(),
}
}
}
@ -150,7 +147,7 @@ pub struct Component {
}
impl Component {
/// A helper method that returns the total count of running copies of this component, regardless of
/// A helper method that returns the total count of running copies of this actor, regardless of
/// which host they are running on
pub fn count(&self) -> usize {
self.instances
@ -159,7 +156,7 @@ impl Component {
.sum()
}
/// A helper method that returns the total count of running copies of this component on a specific
/// A helper method that returns the total count of running copies of this actor on a specific
/// host
pub fn count_for_host(&self, host_id: &str) -> usize {
self.instances
@ -280,13 +277,13 @@ impl From<&HostStarted> for Host {
impl From<HostHeartbeat> for Host {
fn from(value: HostHeartbeat) -> Self {
let components = value
let actors = value
.components
.into_iter()
.map(|component| {
.map(|actor| {
(
component.id().into(), // SAFETY: Unlikely to not fit into a usize, but fallback just in case
component.max_instances().try_into().unwrap_or(usize::MAX),
actor.id, // SAFETY: Unlikely to not fit into a usize, but fallback just in case
actor.max_instances.try_into().unwrap_or(usize::MAX),
)
})
.collect();
@ -295,19 +292,18 @@ impl From<HostHeartbeat> for Host {
.providers
.into_iter()
.map(|provider| ProviderInfo {
provider_id: provider.id().to_string(),
provider_id: provider.id,
// NOTE: Provider should _always_ have an image ref. The control interface type should be updated.
provider_ref: provider.image_ref().map(String::from).unwrap_or_default(),
provider_ref: provider.image_ref.unwrap_or_default(),
annotations: provider
.annotations()
.map(ToOwned::to_owned)
.map(BTreeMap::from_iter)
.annotations
.map(|a| a.into_iter().collect())
.unwrap_or_default(),
})
.collect();
Host {
components,
components: actors,
friendly_name: value.friendly_name,
labels: value.labels,
providers,
@ -321,14 +317,14 @@ impl From<HostHeartbeat> for Host {
impl From<&HostHeartbeat> for Host {
fn from(value: &HostHeartbeat) -> Self {
let components = value
let actors = value
.components
.iter()
.map(|component| {
.map(|actor| {
(
component.id().to_owned(),
actor.id.to_owned(),
// SAFETY: Unlikely to not fit into a usize, but fallback just in case
component.max_instances().try_into().unwrap_or(usize::MAX),
actor.max_instances.try_into().unwrap_or(usize::MAX),
)
})
.collect();
@ -337,18 +333,18 @@ impl From<&HostHeartbeat> for Host {
.providers
.iter()
.map(|provider| ProviderInfo {
provider_id: provider.id().to_owned(),
provider_ref: provider.image_ref().map(String::from).unwrap_or_default(),
provider_id: provider.id.to_owned(),
provider_ref: provider.image_ref.to_owned().unwrap_or_default(),
annotations: provider
.annotations()
.map(ToOwned::to_owned)
.map(BTreeMap::from_iter)
.annotations
.clone()
.map(|a| a.into_iter().collect())
.unwrap_or_default(),
})
.collect();
Host {
components,
components: actors,
friendly_name: value.friendly_name.clone(),
labels: value.labels.clone(),
providers,

View File

@ -3,15 +3,11 @@ use std::{collections::HashMap, sync::Arc};
use serde::{de::DeserializeOwned, Serialize};
use tokio::sync::RwLock;
use wasmcloud_control_interface::{HostInventory, Link};
use wasmcloud_secrets_types::SecretConfig;
use wasmcloud_control_interface::{HostInventory, InterfaceLinkDefinition};
use crate::publisher::Publisher;
use crate::storage::StateKind;
use crate::workers::{
secret_config_from_map, Claims, ClaimsSource, ConfigSource, InventorySource, LinkSource,
SecretSource,
};
use crate::workers::{Claims, ClaimsSource, ConfigSource, InventorySource, LinkSource};
fn generate_key<T: StateKind>(lattice_id: &str) -> String {
format!("{}_{lattice_id}", T::KIND)
@ -62,7 +58,7 @@ impl crate::storage::ReadStore for TestStore {
impl crate::storage::Store for TestStore {
async fn store_many<T, D>(&self, lattice_id: &str, data: D) -> Result<(), Self::Error>
where
T: Serialize + DeserializeOwned + StateKind + Send + Sync + Clone,
T: Serialize + DeserializeOwned + StateKind + Send,
D: IntoIterator<Item = (String, T)> + Send,
{
let key = generate_key::<T>(lattice_id);
@ -83,7 +79,7 @@ impl crate::storage::Store for TestStore {
async fn delete_many<T, D, K>(&self, lattice_id: &str, data: D) -> Result<(), Self::Error>
where
T: Serialize + DeserializeOwned + StateKind + Send + Sync,
T: Serialize + DeserializeOwned + StateKind + Send,
D: IntoIterator<Item = K> + Send,
K: AsRef<str>,
{
@ -111,7 +107,7 @@ impl crate::storage::Store for TestStore {
pub struct TestLatticeSource {
pub claims: HashMap<String, Claims>,
pub inventory: Arc<RwLock<HashMap<String, HostInventory>>>,
pub links: Vec<Link>,
pub links: Vec<InterfaceLinkDefinition>,
pub config: HashMap<String, HashMap<String, String>>,
}
@ -131,7 +127,7 @@ impl InventorySource for TestLatticeSource {
#[async_trait::async_trait]
impl LinkSource for TestLatticeSource {
async fn get_links(&self) -> anyhow::Result<Vec<Link>> {
async fn get_links(&self) -> anyhow::Result<Vec<InterfaceLinkDefinition>> {
Ok(self.links.clone())
}
}
@ -143,18 +139,6 @@ impl ConfigSource for TestLatticeSource {
}
}
#[async_trait::async_trait]
impl SecretSource for TestLatticeSource {
async fn get_secret(&self, name: &str) -> anyhow::Result<Option<SecretConfig>> {
let secret_config = self
.get_config(format!("secret_{name}").as_str())
.await
.map_err(|e| anyhow::anyhow!("{e:?}"))?;
secret_config.map(secret_config_from_map).transpose()
}
}
/// A publisher that does nothing
#[derive(Clone, Default)]
pub struct NoopPublisher;

View File

@ -30,19 +30,19 @@ impl Worker for CommandWorker {
#[instrument(level = "trace", skip_all)]
async fn do_work(&self, mut message: ScopedMessage<Self::Message>) -> WorkResult<()> {
let res = match message.as_ref() {
Command::ScaleComponent(component) => {
trace!(command = ?component, "Handling scale component command");
Command::ScaleComponent(actor) => {
trace!(command = ?actor, "Handling scale actor command");
// Order here is intentional to prevent scalers from overwriting managed annotations
let mut annotations = component.annotations.clone();
insert_managed_annotations(&mut annotations, &component.model_name);
let mut annotations = actor.annotations.clone();
insert_managed_annotations(&mut annotations, &actor.model_name);
self.client
.scale_component(
&component.host_id,
&component.reference,
&component.component_id,
component.count,
&actor.host_id,
&actor.reference,
&actor.component_id,
actor.count,
Some(annotations.into_iter().collect()),
component.config.clone(),
actor.config.clone(),
)
.await
}
@ -74,7 +74,7 @@ impl Worker for CommandWorker {
trace!(command = ?ld, "Handling put linkdef command");
// TODO(thomastaylor312): We should probably change ScopedMessage to allow us `pub`
// access to the inner type so we don't have to clone, but no need to worry for now
self.client.put_link(ld.clone().try_into()?).await
self.client.put_link(ld.clone().into()).await
}
Command::DeleteLink(ld) => {
trace!(command = ?ld, "Handling delete linkdef command");
@ -101,11 +101,9 @@ impl Worker for CommandWorker {
.map_err(|e| anyhow::anyhow!("{e:?}"));
match res {
Ok(ack) if !ack.succeeded() => {
Ok(ack) if !ack.success => {
message.nack().await;
Err(WorkError::Other(
anyhow::anyhow!("{}", ack.message()).into(),
))
Err(WorkError::Other(anyhow::anyhow!("{}", ack.message).into()))
}
Ok(_) => message.ack().await.map_err(WorkError::from),
Err(e) => {

File diff suppressed because it is too large Load Diff

View File

@ -1,12 +1,10 @@
use anyhow::{bail, Context};
use async_nats::jetstream::stream::Stream;
use std::collections::{BTreeMap, HashMap};
use std::fmt::Debug;
use wasmcloud_secrets_types::SecretConfig;
use tracing::{debug, instrument, trace, warn};
use wadm_types::api::Status;
use wasmcloud_control_interface::{HostInventory, Link};
use wadm_types::api::StatusInfo;
use wasmcloud_control_interface::{CtlResponse, HostInventory, InterfaceLinkDefinition};
use crate::{commands::Command, publisher::Publisher, APP_SPEC_ANNOTATION};
@ -18,7 +16,7 @@ pub struct Claims {
pub issuer: String,
}
/// A trait for anything that can fetch a set of claims information about components.
/// A trait for anything that can fetch a set of claims information about actors.
///
/// NOTE: This trait right now exists as a convenience for two things: First, testing. Without
/// something like this we require a network connection to unit test. Second, there is no concrete
@ -31,7 +29,7 @@ pub trait ClaimsSource {
/// NOTE(brooksmtownsend): This trait exists in order to query the hosts inventory
/// upon receiving a heartbeat since the heartbeat doesn't contain enough
/// information to properly update the stored data for components
/// information to properly update the stored data for actors
#[async_trait::async_trait]
pub trait InventorySource {
async fn get_inventory(&self, host_id: &str) -> anyhow::Result<HostInventory>;
@ -43,7 +41,7 @@ pub trait InventorySource {
/// due to testing, but it does allow us to abstract away the concrete type of the client
#[async_trait::async_trait]
pub trait LinkSource {
async fn get_links(&self) -> anyhow::Result<Vec<Link>>;
async fn get_links(&self) -> anyhow::Result<Vec<InterfaceLinkDefinition>>;
}
/// A trait for anything that can fetch a piece of named configuration
@ -56,54 +54,22 @@ pub trait ConfigSource {
async fn get_config(&self, name: &str) -> anyhow::Result<Option<HashMap<String, String>>>;
}
/// A trait for anything that can fetch a secret.
#[async_trait::async_trait]
pub trait SecretSource {
async fn get_secret(&self, name: &str) -> anyhow::Result<Option<SecretConfig>>;
}
/// Converts the configuration map of strings to a secret config
pub fn secret_config_from_map(map: HashMap<String, String>) -> anyhow::Result<SecretConfig> {
match (
map.get("name"),
map.get("backend"),
map.get("key"),
map.get("policy"),
map.get("type"),
) {
(None, _, _, _, _) => bail!("missing name field in secret config"),
(_, None, _, _, _) => bail!("missing backend field in secret config"),
(_, _, None, _, _) => bail!("missing key field in secret config"),
(_, _, _, None, _) => bail!("missing policy field in secret config"),
(_, _, _, _, None) => bail!("missing type field in secret config"),
(Some(name), Some(backend), Some(key), Some(policy), Some(secret_type)) => {
Ok(SecretConfig {
name: name.to_string(),
backend: backend.to_string(),
key: key.to_string(),
field: map.get("field").map(|f| f.to_string()),
version: map.get("version").map(|v| v.to_string()),
policy: serde_json::from_str(policy)
.context("failed to deserialize policy from string")?,
secret_type: secret_type.to_string(),
})
}
}
}
#[async_trait::async_trait]
impl ClaimsSource for wasmcloud_control_interface::Client {
async fn get_claims(&self) -> anyhow::Result<HashMap<String, Claims>> {
match self.get_claims().await.map_err(|e| anyhow::anyhow!("{e}")) {
Ok(ctl_resp) if ctl_resp.succeeded() => {
let claims = ctl_resp.data().context("missing claims data")?.to_owned();
Ok(CtlResponse {
success: true,
response: Some(claims),
..
}) => {
Ok(claims
.into_iter()
.filter_map(|mut claim| {
// NOTE(thomastaylor312): I'm removing instead of getting since we own the data and I
// don't want to clone every time we do this
// If we don't find a subject, we can't actually get the component ID, so skip this one
// If we don't find a subject, we can't actually get the actor ID, so skip this one
Some((
claim.remove("sub")?,
Claims {
@ -131,12 +97,13 @@ impl InventorySource for wasmcloud_control_interface::Client {
.await
.map_err(|e| anyhow::anyhow!("{e:?}"))?
{
ctl_resp if ctl_resp.succeeded() && ctl_resp.data().is_some() => Ok(ctl_resp
.into_data()
.context("missing host inventory data")?),
ctl_resp => Err(anyhow::anyhow!(
"Failed to get inventory for host {host_id}, {}",
ctl_resp.message()
CtlResponse {
success: true,
response: Some(host_inventory),
..
} => Ok(host_inventory),
CtlResponse { message, .. } => Err(anyhow::anyhow!(
"Failed to get inventory for host {host_id}, {message}"
)),
}
}
@ -148,19 +115,18 @@ impl InventorySource for wasmcloud_control_interface::Client {
// links
#[async_trait::async_trait]
impl LinkSource for wasmcloud_control_interface::Client {
async fn get_links(&self) -> anyhow::Result<Vec<Link>> {
async fn get_links(&self) -> anyhow::Result<Vec<InterfaceLinkDefinition>> {
match self
.get_links()
.await
.map_err(|e| anyhow::anyhow!("{e:?}"))?
{
ctl_resp if ctl_resp.succeeded() && ctl_resp.data().is_some() => {
Ok(ctl_resp.into_data().context("missing link data")?)
}
ctl_resp => Err(anyhow::anyhow!(
"Failed to get links, {}",
ctl_resp.message()
)),
CtlResponse {
success: true,
response: Some(links),
..
} => Ok(links),
CtlResponse { message, .. } => Err(anyhow::anyhow!("Failed to get links, {message}")),
}
}
}
@ -173,37 +139,15 @@ impl ConfigSource for wasmcloud_control_interface::Client {
.await
.map_err(|e| anyhow::anyhow!("{e:?}"))?
{
ctl_resp if ctl_resp.succeeded() && ctl_resp.data().is_some() => {
Ok(ctl_resp.into_data())
}
CtlResponse {
success: true,
response: Some(config),
..
} => Ok(Some(config)),
// TODO(https://github.com/wasmCloud/wasmCloud/issues/1906): The control interface should return a None when config isn't found
// instead of returning an error.
ctl_resp => {
debug!("Failed to get config for {name}, {}", ctl_resp.message());
Ok(None)
}
}
}
}
#[async_trait::async_trait]
impl SecretSource for wasmcloud_control_interface::Client {
async fn get_secret(&self, name: &str) -> anyhow::Result<Option<SecretConfig>> {
match self
.get_config(name)
.await
.map_err(|e| anyhow::anyhow!("{e:?}"))?
{
ctl_resp if ctl_resp.succeeded() && ctl_resp.data().is_some() => {
secret_config_from_map(ctl_resp.into_data().context("missing secret data")?)
.map(Some)
}
ctl_resp if ctl_resp.data().is_none() => {
debug!("Failed to get secret for {name}, {}", ctl_resp.message());
Ok(None)
}
ctl_resp => {
debug!("Failed to get secret for {name}, {}", ctl_resp.message());
CtlResponse { message, .. } => {
debug!("Failed to get config for {name}, {message}");
Ok(None)
}
}
@ -238,7 +182,7 @@ impl<Pub> StatusPublisher<Pub> {
impl<Pub: Publisher> StatusPublisher<Pub> {
#[instrument(level = "trace", skip(self))]
pub async fn publish_status(&self, name: &str, status: Status) -> anyhow::Result<()> {
pub async fn publish_status(&self, name: &str, status: StatusInfo) -> anyhow::Result<()> {
let topic = format!("{}.{name}", self.topic_prefix);
// NOTE(brooksmtownsend): This direct get may not always query the jetstream leader. In the
@ -248,7 +192,7 @@ impl<Pub: Publisher> StatusPublisher<Pub> {
status_stream
.direct_get_last_for_subject(&topic)
.await
.map(|m| serde_json::from_slice::<Status>(&m.payload).ok())
.map(|m| serde_json::from_slice::<StatusInfo>(&m.payload).ok())
.ok()
.flatten()
} else {

BIN
deployjenkins.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 677 B

View File

@ -1,704 +0,0 @@
{
"nodes": {
"advisory-db": {
"flake": false,
"locked": {
"lastModified": 1737565911,
"narHash": "sha256-WxIWw1mSPJVU1JfIcTdIubU5UoIwwR8h7UcXop/6htg=",
"owner": "rustsec",
"repo": "advisory-db",
"rev": "ffa26704690a3dc403edcd94baef103ee48f66eb",
"type": "github"
},
"original": {
"owner": "rustsec",
"repo": "advisory-db",
"type": "github"
}
},
"advisory-db_2": {
"flake": false,
"locked": {
"lastModified": 1730464311,
"narHash": "sha256-9xJoP1766XJSO1Qr0Lxg2P6dwPncTr3BJYlFMSXBd/E=",
"owner": "rustsec",
"repo": "advisory-db",
"rev": "f3460e5ed91658ab94fa41908cfa44991f9f4f02",
"type": "github"
},
"original": {
"owner": "rustsec",
"repo": "advisory-db",
"type": "github"
}
},
"crane": {
"locked": {
"lastModified": 1737689766,
"narHash": "sha256-ivVXYaYlShxYoKfSo5+y5930qMKKJ8CLcAoIBPQfJ6s=",
"owner": "ipetkov",
"repo": "crane",
"rev": "6fe74265bbb6d016d663b1091f015e2976c4a527",
"type": "github"
},
"original": {
"owner": "ipetkov",
"repo": "crane",
"type": "github"
}
},
"crane_2": {
"locked": {
"lastModified": 1730652660,
"narHash": "sha256-+XVYfmVXAiYA0FZT7ijHf555dxCe+AoAT5A6RU+6vSo=",
"owner": "ipetkov",
"repo": "crane",
"rev": "a4ca93905455c07cb7e3aca95d4faf7601cba458",
"type": "github"
},
"original": {
"owner": "ipetkov",
"repo": "crane",
"type": "github"
}
},
"crane_3": {
"inputs": {
"flake-compat": "flake-compat",
"flake-utils": [
"wasmcloud",
"nixify",
"nix-log",
"nixify",
"flake-utils"
],
"nixpkgs": [
"wasmcloud",
"nixify",
"nix-log",
"nixify",
"nixpkgs"
],
"rust-overlay": [
"wasmcloud",
"nixify",
"nix-log",
"nixify",
"rust-overlay"
]
},
"locked": {
"lastModified": 1679255352,
"narHash": "sha256-nkGwGuNkhNrnN33S4HIDV5NzkzMLU5mNStRn9sZwq8c=",
"owner": "rvolosatovs",
"repo": "crane",
"rev": "cec65880599a4ec6426186e24342e663464f5933",
"type": "github"
},
"original": {
"owner": "rvolosatovs",
"ref": "feat/wit",
"repo": "crane",
"type": "github"
}
},
"fenix": {
"inputs": {
"nixpkgs": [
"nixpkgs"
],
"rust-analyzer-src": []
},
"locked": {
"lastModified": 1738132439,
"narHash": "sha256-7q5vsyPQf6/aQEKAOgZ4ggv++Z2ppPSuPCGKlbPcM88=",
"owner": "nix-community",
"repo": "fenix",
"rev": "f94e521c1922784c377a2cace90aa89a6b8a1011",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "fenix",
"type": "github"
}
},
"fenix_2": {
"inputs": {
"nixpkgs": [
"wasmcloud",
"nixify",
"nixpkgs-nixos"
],
"rust-analyzer-src": "rust-analyzer-src"
},
"locked": {
"lastModified": 1731047492,
"narHash": "sha256-F4h8YtTzPWv0/1Z6fc8fMSqKpn7YhOjlgp66cr15tEo=",
"owner": "nix-community",
"repo": "fenix",
"rev": "da6332e801fbb0418f80f20cefa947c5fe5c18c9",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "fenix",
"type": "github"
}
},
"fenix_3": {
"inputs": {
"nixpkgs": [
"wasmcloud",
"nixify",
"nix-log",
"nixify",
"nixpkgs"
],
"rust-analyzer-src": "rust-analyzer-src_2"
},
"locked": {
"lastModified": 1679552560,
"narHash": "sha256-L9Se/F1iLQBZFGrnQJO8c9wE5z0Mf8OiycPGP9Y96hA=",
"owner": "nix-community",
"repo": "fenix",
"rev": "fb49a9f5605ec512da947a21cc7e4551a3950397",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "fenix",
"type": "github"
}
},
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1673956053,
"narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_2": {
"inputs": {
"systems": "systems_2"
},
"locked": {
"lastModified": 1726560853,
"narHash": "sha256-X6rJYSESBVr3hBoH0WbKE5KvhPU5bloyZ2L4K60/fPQ=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "c1dfcf08411b08f6b8615f7d8971a2bfa81d5e8a",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_3": {
"locked": {
"lastModified": 1678901627,
"narHash": "sha256-U02riOqrKKzwjsxc/400XnElV+UtPUQWpANPlyazjH0=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "93a2b84fc4b70d9e089d029deacc3583435c2ed6",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"macos-sdk": {
"flake": false,
"locked": {
"lastModified": 1694769349,
"narHash": "sha256-TEvVJy+NMPyzgWSk/6S29ZMQR+ICFxSdS3tw247uhFc=",
"type": "tarball",
"url": "https://github.com/roblabla/MacOSX-SDKs/releases/download/macosx14.0/MacOSX14.0.sdk.tar.xz"
},
"original": {
"type": "tarball",
"url": "https://github.com/roblabla/MacOSX-SDKs/releases/download/macosx14.0/MacOSX14.0.sdk.tar.xz"
}
},
"nix-filter": {
"locked": {
"lastModified": 1730207686,
"narHash": "sha256-SCHiL+1f7q9TAnxpasriP6fMarWE5H43t25F5/9e28I=",
"owner": "numtide",
"repo": "nix-filter",
"rev": "776e68c1d014c3adde193a18db9d738458cd2ba4",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "nix-filter",
"type": "github"
}
},
"nix-filter_2": {
"locked": {
"lastModified": 1678109515,
"narHash": "sha256-C2X+qC80K2C1TOYZT8nabgo05Dw2HST/pSn6s+n6BO8=",
"owner": "numtide",
"repo": "nix-filter",
"rev": "aa9ff6ce4a7f19af6415fb3721eaa513ea6c763c",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "nix-filter",
"type": "github"
}
},
"nix-flake-tests": {
"locked": {
"lastModified": 1677844186,
"narHash": "sha256-ErJZ/Gs1rxh561CJeWP5bohA2IcTq1rDneu1WT6CVII=",
"owner": "antifuchs",
"repo": "nix-flake-tests",
"rev": "bbd9216bd0f6495bb961a8eb8392b7ef55c67afb",
"type": "github"
},
"original": {
"owner": "antifuchs",
"repo": "nix-flake-tests",
"type": "github"
}
},
"nix-flake-tests_2": {
"locked": {
"lastModified": 1677844186,
"narHash": "sha256-ErJZ/Gs1rxh561CJeWP5bohA2IcTq1rDneu1WT6CVII=",
"owner": "antifuchs",
"repo": "nix-flake-tests",
"rev": "bbd9216bd0f6495bb961a8eb8392b7ef55c67afb",
"type": "github"
},
"original": {
"owner": "antifuchs",
"repo": "nix-flake-tests",
"type": "github"
}
},
"nix-log": {
"inputs": {
"nix-flake-tests": "nix-flake-tests",
"nixify": "nixify_2",
"nixlib": "nixlib_2"
},
"locked": {
"lastModified": 1681933283,
"narHash": "sha256-phDsQdaoUEI4DUTErR6Tz7lS0y3kXvDwwbqtxpzd0eo=",
"owner": "rvolosatovs",
"repo": "nix-log",
"rev": "833d31e3c1a677eac81ba87e777afa5076071d66",
"type": "github"
},
"original": {
"owner": "rvolosatovs",
"repo": "nix-log",
"type": "github"
}
},
"nix-log_2": {
"inputs": {
"nix-flake-tests": "nix-flake-tests_2",
"nixify": [
"wasmcloud",
"wit-deps",
"nixify"
],
"nixlib": [
"wasmcloud",
"wit-deps",
"nixlib"
]
},
"locked": {
"lastModified": 1681933283,
"narHash": "sha256-phDsQdaoUEI4DUTErR6Tz7lS0y3kXvDwwbqtxpzd0eo=",
"owner": "rvolosatovs",
"repo": "nix-log",
"rev": "833d31e3c1a677eac81ba87e777afa5076071d66",
"type": "github"
},
"original": {
"owner": "rvolosatovs",
"repo": "nix-log",
"type": "github"
}
},
"nixify": {
"inputs": {
"advisory-db": "advisory-db_2",
"crane": "crane_2",
"fenix": "fenix_2",
"flake-utils": "flake-utils_2",
"macos-sdk": "macos-sdk",
"nix-filter": "nix-filter",
"nix-log": "nix-log",
"nixlib": [
"wasmcloud",
"nixlib"
],
"nixpkgs-darwin": "nixpkgs-darwin",
"nixpkgs-nixos": "nixpkgs-nixos",
"rust-overlay": "rust-overlay_2"
},
"locked": {
"lastModified": 1731068753,
"narHash": "sha256-6H+vYAYl/koFsiBEM4WHZhOoOQ2Hfzd+MtcxFfAOOtw=",
"owner": "rvolosatovs",
"repo": "nixify",
"rev": "7b83953ebfb22ba1f623ac06312aebee81f2182e",
"type": "github"
},
"original": {
"owner": "rvolosatovs",
"repo": "nixify",
"type": "github"
}
},
"nixify_2": {
"inputs": {
"crane": "crane_3",
"fenix": "fenix_3",
"flake-utils": "flake-utils_3",
"nix-filter": "nix-filter_2",
"nixlib": "nixlib",
"nixpkgs": "nixpkgs_2",
"rust-overlay": "rust-overlay"
},
"locked": {
"lastModified": 1679748566,
"narHash": "sha256-yA4yIJjNCOLoUh0py9S3SywwbPnd/6NPYbXad+JeOl0=",
"owner": "rvolosatovs",
"repo": "nixify",
"rev": "80e823959511a42dfec4409fef406a14ae8240f3",
"type": "github"
},
"original": {
"owner": "rvolosatovs",
"repo": "nixify",
"type": "github"
}
},
"nixlib": {
"locked": {
"lastModified": 1679187309,
"narHash": "sha256-H8udmkg5wppL11d/05MMzOMryiYvc403axjDNZy1/TQ=",
"owner": "nix-community",
"repo": "nixpkgs.lib",
"rev": "44214417fe4595438b31bdb9469be92536a61455",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "nixpkgs.lib",
"type": "github"
}
},
"nixlib_2": {
"locked": {
"lastModified": 1679791877,
"narHash": "sha256-tTV1Mf0hPWIMtqyU16Kd2JUBDWvfHlDC9pF57vcbgpQ=",
"owner": "nix-community",
"repo": "nixpkgs.lib",
"rev": "cc060ddbf652a532b54057081d5abd6144d01971",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "nixpkgs.lib",
"type": "github"
}
},
"nixlib_3": {
"locked": {
"lastModified": 1731200463,
"narHash": "sha256-qDaAweJjdFbVExqs8aG27urUgcgKufkIngHW3Rzustg=",
"owner": "nix-community",
"repo": "nixpkgs.lib",
"rev": "e04234d263750db01c78a412690363dc2226e68a",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "nixpkgs.lib",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1738163270,
"narHash": "sha256-B/7Y1v4y+msFFBW1JAdFjNvVthvNdJKiN6EGRPnqfno=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "59e618d90c065f55ae48446f307e8c09565d5ab0",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "release-24.11",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-darwin": {
"locked": {
"lastModified": 1730891215,
"narHash": "sha256-i85DPrhDuvzgvIWCpJlbfM2UFtNYbapo20MtQXsvay4=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "c128e44a249d6180740d0a979b6480d5b795c013",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixpkgs-24.05-darwin",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-nixos": {
"locked": {
"lastModified": 1730883749,
"narHash": "sha256-mwrFF0vElHJP8X3pFCByJR365Q2463ATp2qGIrDUdlE=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "dba414932936fde69f0606b4f1d87c5bc0003ede",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixos-24.05",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1679577639,
"narHash": "sha256-7u7bsNP0ApBnLgsHVROQ5ytoMqustmMVMgtaFS/P7EU=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "8f1bcd72727c5d4cd775545595d068be410f2a7e",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixpkgs-22.11-darwin",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"advisory-db": "advisory-db",
"crane": "crane",
"fenix": "fenix",
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs",
"wasmcloud": "wasmcloud"
}
},
"rust-analyzer-src": {
"flake": false,
"locked": {
"lastModified": 1730989300,
"narHash": "sha256-ZWSta9893f/uF5PoRFn/BSUAxF4dKW+TIbdA6rZoGBg=",
"owner": "rust-lang",
"repo": "rust-analyzer",
"rev": "1042a8c22c348491a4bade4f664430b03d6f5b5c",
"type": "github"
},
"original": {
"owner": "rust-lang",
"ref": "nightly",
"repo": "rust-analyzer",
"type": "github"
}
},
"rust-analyzer-src_2": {
"flake": false,
"locked": {
"lastModified": 1679520343,
"narHash": "sha256-AJGSGWRfoKWD5IVTu1wEsR990wHbX0kIaolPqNMEh0c=",
"owner": "rust-lang",
"repo": "rust-analyzer",
"rev": "eb791f31e688ae00908eb75d4c704ef60c430a92",
"type": "github"
},
"original": {
"owner": "rust-lang",
"ref": "nightly",
"repo": "rust-analyzer",
"type": "github"
}
},
"rust-overlay": {
"inputs": {
"flake-utils": [
"wasmcloud",
"nixify",
"nix-log",
"nixify",
"flake-utils"
],
"nixpkgs": [
"wasmcloud",
"nixify",
"nix-log",
"nixify",
"nixpkgs"
]
},
"locked": {
"lastModified": 1679537973,
"narHash": "sha256-R6borgcKeyMIjjPeeYsfo+mT8UdS+OwwbhhStdCfEjg=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "fbc7ae3f14d32e78c0e8d7865f865cc28a46b232",
"type": "github"
},
"original": {
"owner": "oxalica",
"repo": "rust-overlay",
"type": "github"
}
},
"rust-overlay_2": {
"inputs": {
"nixpkgs": [
"wasmcloud",
"nixify",
"nixpkgs-nixos"
]
},
"locked": {
"lastModified": 1731032894,
"narHash": "sha256-dQSyYPmrQiPr+PGEd+K8038rubFGz7G/dNXVeaGWE0w=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "d52f2a4c103a0acf09ded857b9e2519ae2360e59",
"type": "github"
},
"original": {
"owner": "oxalica",
"repo": "rust-overlay",
"type": "github"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_2": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"wasmcloud": {
"inputs": {
"nixify": "nixify",
"nixlib": "nixlib_3",
"wit-deps": "wit-deps"
},
"locked": {
"lastModified": 1731409523,
"narHash": "sha256-Q/BnuJaMyJfY+p9VpdyBWtRjEo4TdRvFMMhfdDFj6cU=",
"owner": "wasmCloud",
"repo": "wasmCloud",
"rev": "579455058513b907c7df4a4ec13728f83c6b782b",
"type": "github"
},
"original": {
"owner": "wasmCloud",
"ref": "wash-cli-v0.37.0",
"repo": "wasmCloud",
"type": "github"
}
},
"wit-deps": {
"inputs": {
"nix-log": "nix-log_2",
"nixify": [
"wasmcloud",
"nixify"
],
"nixlib": [
"wasmcloud",
"nixlib"
]
},
"locked": {
"lastModified": 1727963723,
"narHash": "sha256-urAGMGMH5ousEeVTZ5AaLPfowXaYQoISNXiutV00iQo=",
"owner": "bytecodealliance",
"repo": "wit-deps",
"rev": "eb7c84564acfe13a4197bb15052fd2e2b3d29775",
"type": "github"
},
"original": {
"owner": "bytecodealliance",
"ref": "v0.4.0",
"repo": "wit-deps",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

264
flake.nix
View File

@ -1,264 +0,0 @@
{
nixConfig.extra-substituters =
[ "https://wasmcloud.cachix.org" "https://crane.cachix.org" ];
nixConfig.extra-trusted-public-keys = [
"wasmcloud.cachix.org-1:9gRBzsKh+x2HbVVspreFg/6iFRiD4aOcUQfXVDl3hiM="
"crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk="
];
description = "A flake for building and running wadm";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/release-24.11";
crane.url = "github:ipetkov/crane";
fenix = {
url = "github:nix-community/fenix";
inputs.nixpkgs.follows = "nixpkgs";
inputs.rust-analyzer-src.follows = "";
};
flake-utils.url = "github:numtide/flake-utils";
advisory-db = {
url = "github:rustsec/advisory-db";
flake = false;
};
# The wash CLI flag is always after the latest host release tag we want
wasmcloud.url = "github:wasmCloud/wasmCloud/wash-cli-v0.37.0";
};
outputs =
{ self, nixpkgs, crane, fenix, flake-utils, advisory-db, wasmcloud, ... }:
flake-utils.lib.eachDefaultSystem (system:
let
pkgs = nixpkgs.legacyPackages.${system};
inherit (pkgs) lib;
craneLib = crane.mkLib pkgs;
src = craneLib.cleanCargoSource ./.;
# Common arguments can be set here to avoid repeating them later
commonArgs = {
inherit src;
strictDeps = true;
buildInputs = [
# Add additional build inputs here
] ++ lib.optionals pkgs.stdenv.isDarwin [
# Additional darwin specific inputs can be set here if needed
];
# Additional environment variables can be set directly here if needed
# MY_CUSTOM_VAR = "some value";
};
craneLibLLvmTools = craneLib.overrideToolchain
(fenix.packages.${system}.complete.withComponents [
"cargo"
"llvm-tools"
"rustc"
]);
# Get the lock file for filtering
rawLockFile = builtins.fromTOML (builtins.readFile ./Cargo.lock);
# Filter out the workspace members
filteredLockFile = rawLockFile // {
package = builtins.filter (x: !lib.strings.hasPrefix "wadm" x.name)
rawLockFile.package;
};
cargoVendorDir =
craneLib.vendorCargoDeps { cargoLockParsed = filteredLockFile; };
cargoLock = craneLib.writeTOML "Cargo.lock" filteredLockFile;
# Build *just* the cargo dependencies (of the entire workspace), but we don't want to build
# any of the other things in the crate to avoid rebuilding things in the dependencies when
# we change workspace crate dependencies
cargoArtifacts = let
commonArgs' = removeAttrs commonArgs [ "src" ];
# Get the manifest file for filtering
rawManifestFile = builtins.fromTOML (builtins.readFile ./Cargo.toml);
# Filter out the workspace members from manifest
filteredManifestFile = with lib;
let
filterWadmAttrs =
filterAttrs (name: _: !strings.hasPrefix "wadm" name);
workspace = removeAttrs rawManifestFile.workspace [ "members" ];
in rawManifestFile // {
workspace = workspace // {
dependencies = filterWadmAttrs workspace.dependencies;
package = workspace.package // {
# pin version to avoid rebuilds on bumps
version = "0.0.0";
};
};
dependencies = filterWadmAttrs rawManifestFile.dependencies;
dev-dependencies =
filterWadmAttrs rawManifestFile.dev-dependencies;
build-dependencies =
filterWadmAttrs rawManifestFile.build-dependencies;
};
cargoToml = craneLib.writeTOML "Cargo.toml" filteredManifestFile;
dummySrc = craneLib.mkDummySrc {
src = pkgs.runCommand "wadm-dummy-src" { } ''
mkdir -p $out
cp --recursive --no-preserve=mode,ownership ${src}/. -t $out
cp ${cargoToml} $out/Cargo.toml
'';
};
args = commonArgs' // {
inherit cargoLock cargoToml cargoVendorDir dummySrc;
cargoExtraArgs = ""; # disable `--locked` passed by default by crane
};
in craneLib.buildDepsOnly args;
individualCrateArgs = commonArgs // {
inherit (craneLib.crateNameFromCargoToml { inherit src; }) version;
# TODO(thomastaylor312) We run unit tests here and e2e tests externally. The nextest step
# wasn't letting me pass in the fileset
doCheck = true;
};
fileSetForCrate = lib.fileset.toSource {
root = ./.;
fileset = lib.fileset.unions [
./Cargo.toml
./Cargo.lock
./tests
./oam
(craneLib.fileset.commonCargoSources ./crates/wadm)
(craneLib.fileset.commonCargoSources ./crates/wadm-client)
(craneLib.fileset.commonCargoSources ./crates/wadm-types)
];
};
# Build the top-level crates of the workspace as individual derivations.
# This allows consumers to only depend on (and build) only what they need.
# Though it is possible to build the entire workspace as a single derivation,
# so this is left up to you on how to organize things
#
# Note that the cargo workspace must define `workspace.members` using wildcards,
# otherwise, omitting a crate (like we do below) will result in errors since
# cargo won't be able to find the sources for all members.
# TODO(thomastaylor312) I tried using `doInstallCargoArtifacts` and passing in things to the
# next derivations as the `cargoArtifacts`, but that ended up always building things twice
# rather than caching. We should look into it more and see if there's a way to make it work.
wadm-lib = craneLib.cargoBuild (individualCrateArgs // {
inherit cargoArtifacts;
pname = "wadm";
cargoExtraArgs = "-p wadm";
src = fileSetForCrate;
});
wadm = craneLib.buildPackage (individualCrateArgs // {
inherit cargoArtifacts;
pname = "wadm-cli";
cargoExtraArgs = "--bin wadm";
src = fileSetForCrate;
});
wadm-client = craneLib.cargoBuild (individualCrateArgs // {
inherit cargoArtifacts;
pname = "wadm-client";
cargoExtraArgs = "-p wadm-client";
src = fileSetForCrate;
});
wadm-types = craneLib.cargoBuild (individualCrateArgs // {
inherit cargoArtifacts;
pname = "wadm-types";
cargoExtraArgs = "-p wadm-types";
src = fileSetForCrate;
});
in {
checks = {
# Build the crates as part of `nix flake check` for convenience
inherit wadm wadm-client wadm-types;
# Run clippy (and deny all warnings) on the workspace source,
# again, reusing the dependency artifacts from above.
#
# Note that this is done as a separate derivation so that
# we can block the CI if there are issues here, but not
# prevent downstream consumers from building our crate by itself.
workspace-clippy = craneLib.cargoClippy (commonArgs // {
inherit cargoArtifacts;
cargoClippyExtraArgs = "--all-targets -- --deny warnings";
});
workspace-doc =
craneLib.cargoDoc (commonArgs // { inherit cargoArtifacts; });
# Check formatting
workspace-fmt = craneLib.cargoFmt { inherit src; };
# Audit dependencies
workspace-audit = craneLib.cargoAudit { inherit src advisory-db; };
# Audit licenses
# my-workspace-deny = craneLib.cargoDeny {
# inherit src;
# };
# TODO: the wadm e2e tests use docker compose and things like `wash up` to test things
# (which accesses network currently). We would need to fix those tests to do something
# else to work properly. The low hanging fruit here would be to use the built artifact
# in the e2e tests so we can output those binaries from the nix build and then just
# run the tests from a separate repo. We could also do something like outputting the
# prebuilt artifacts out into the current directory to save on build time. But that is
# for later us to figure out
runE2ETests = pkgs.runCommand "e2e-tests" {
nativeBuildInputs = with pkgs;
[
nats-server
# wasmcloud.wasmcloud
];
} ''
touch $out
'';
};
packages = {
inherit wadm wadm-client wadm-types wadm-lib;
default = wadm;
} // lib.optionalAttrs (!pkgs.stdenv.isDarwin) {
workspace-llvm-coverage = craneLibLLvmTools.cargoLlvmCov
(commonArgs // { inherit cargoArtifacts; });
};
apps = {
wadm = flake-utils.lib.mkApp { drv = wadm; };
default = flake-utils.lib.mkApp { drv = wadm; };
};
devShells.default = craneLib.devShell {
# Inherit inputs from checks.
checks = self.checks.${system};
RUST_SRC_PATH =
"${pkgs.rust.packages.stable.rustPlatform.rustLibSrc}";
# Extra inputs can be added here; cargo and rustc are provided by default.
packages = [
pkgs.nats-server
pkgs.natscli
pkgs.docker
pkgs.git
wasmcloud.outputs.packages.${system}.default
];
};
});
}

View File

@ -2,10 +2,11 @@ use std::{collections::HashMap, time::Duration};
use async_nats::{jetstream, Subscriber};
use futures::StreamExt;
use helpers::setup_env;
use serde::de::DeserializeOwned;
use wadm::server::*;
use wadm_types::{api::*, *};
use wadm::{
model::{Manifest, VERSION_ANNOTATION_KEY},
server::*,
};
mod helpers;
@ -77,8 +78,11 @@ impl TestServer {
}
}
async fn setup_server(id: &str, client: async_nats::Client) -> TestServer {
let store = helpers::create_test_store_with_client(id, client.clone()).await;
async fn setup_server(id: String) -> TestServer {
let client = async_nats::connect("127.0.0.1:4222")
.await
.expect("Should be able to connect to NATS");
let store = helpers::create_test_store_with_client(client.clone(), id.clone()).await;
let context = jetstream::new(client.clone());
let status_stream = context
@ -109,7 +113,7 @@ async fn setup_server(id: &str, client: async_nats::Client) -> TestServer {
let server = Server::new(
store,
client.clone(),
Some(id),
Some(&id),
false,
status_stream,
ManifestNotifier::new(&prefix, client.clone()),
@ -118,12 +122,12 @@ async fn setup_server(id: &str, client: async_nats::Client) -> TestServer {
.expect("Should be able to setup server");
let notify = client
.subscribe(format!("{prefix}.default.>"))
.subscribe(format!("{prefix}.default"))
.await
.expect("Unable to set up subscription");
TestServer {
prefix: id.to_owned(),
prefix: id,
handle: tokio::spawn(server.serve()),
client,
notify,
@ -132,25 +136,16 @@ async fn setup_server(id: &str, client: async_nats::Client) -> TestServer {
#[tokio::test]
async fn test_crud_operations() {
let env = setup_env()
.await
.expect("should have set up the test environment");
let nats_client = env
.nats_client()
.await
.expect("should have created a nats client");
let test_server = setup_server("crud_operations", nats_client).await;
let test_server = setup_server("crud_operations".to_owned()).await;
// First test with a raw file (a common operation)
let raw = tokio::fs::read("./oam/sqldbpostgres.yaml")
let raw = tokio::fs::read("./oam/petclinic.yaml")
.await
.expect("Unable to load file");
let resp: PutModelResponse = test_server
.get_response("default.model.put", raw, None)
.await;
println!("Response: {resp:?}");
assert_put_response(resp, PutResult::Created, "v0.0.1", 1);
let raw = tokio::fs::read("./oam/simple1.yaml")
@ -163,18 +158,12 @@ async fn test_crud_operations() {
let resp: PutModelResponse = test_server
.get_response("default.model.put", raw, None)
.await;
// This manifest has no version, so it's assigned on as a ULID
let example_version = resp.current_version.clone();
assert_put_response(resp, PutResult::Created, &example_version, 1);
assert_put_response(resp, PutResult::Created, "v0.0.1", 1);
// Check that we can get back the manifest
let resp: GetModelResponse = test_server
.get_response("default.model.get.my-example-app", Vec::new(), None)
.await;
manifest
.metadata
.annotations
.insert(VERSION_ANNOTATION_KEY.to_owned(), example_version.clone());
assert_manifest(
&manifest,
resp.manifest
@ -183,8 +172,8 @@ async fn test_crud_operations() {
);
// Now check that the data returned is correct
let ListModelsResponse { models: resp, .. } = test_server
.get_response("default.model.get", Vec::new(), None)
let resp: Vec<ModelSummary> = test_server
.get_response("default.model.list", Vec::new(), None)
.await;
assert_eq!(resp.len(), 2, "Should have two models in storage");
@ -192,13 +181,10 @@ async fn test_crud_operations() {
.iter()
.find(|m| m.name == "my-example-app")
.expect("Should be able to find the correct model");
assert_eq!(
summary.version, example_version,
"Should have the correct data"
);
assert_eq!(summary.version, "v0.0.1", "Should have the correct data");
let summary = resp
.iter()
.find(|m| m.name == "rust-sqldb-postgres-query")
.find(|m| m.name == "petclinic")
.expect("Should be able to find the correct model");
assert_eq!(summary.version, "v0.0.1", "Should have the correct data");
@ -230,8 +216,8 @@ async fn test_crud_operations() {
assert_put_response(resp, PutResult::NewVersion, "v0.0.3", 3);
// Make sure we still only have 2 manifests
let ListModelsResponse { models: resp, .. } = test_server
.get_response("default.model.get", Vec::new(), None)
let resp: Vec<ModelSummary> = test_server
.get_response("default.model.list", Vec::new(), None)
.await;
assert_eq!(resp.len(), 2, "Should still have two models in storage");
@ -247,7 +233,7 @@ async fn test_crud_operations() {
let mut iter = resp.versions.into_iter();
assert_eq!(
iter.next().unwrap().version,
example_version,
"v0.0.1",
"Should find the correct version"
);
assert_eq!(
@ -288,7 +274,8 @@ async fn test_crud_operations() {
.get_response(
"default.model.del.my-example-app",
serde_json::to_vec(&DeleteModelRequest {
version: Some("v0.0.2".to_owned()),
version: "v0.0.2".to_owned(),
delete_all: false,
})
.unwrap(),
None,
@ -311,7 +298,7 @@ async fn test_crud_operations() {
let mut iter = resp.versions.into_iter();
assert_eq!(
iter.next().unwrap().version,
example_version,
"v0.0.1",
"Should find the correct version"
);
assert_eq!(
@ -325,7 +312,11 @@ async fn test_crud_operations() {
let resp: DeleteModelResponse = test_server
.get_response(
"default.model.del.my-example-app",
serde_json::to_vec(&DeleteModelRequest { version: None }).unwrap(),
serde_json::to_vec(&DeleteModelRequest {
version: String::new(),
delete_all: true,
})
.unwrap(),
None,
)
.await;
@ -346,9 +337,10 @@ async fn test_crud_operations() {
// Delete last remaining
let resp: DeleteModelResponse = test_server
.get_response(
"default.model.del.rust-sqldb-postgres-query",
"default.model.del.petclinic",
serde_json::to_vec(&DeleteModelRequest {
version: Some("v0.0.1".to_owned()),
version: "v0.0.1".to_owned(),
delete_all: false,
})
.unwrap(),
None,
@ -360,11 +352,7 @@ async fn test_crud_operations() {
);
let resp: GetModelResponse = test_server
.get_response(
"default.model.get.rust-sqldb-postgres-query",
Vec::new(),
None,
)
.get_response("default.model.get.petclinic", Vec::new(), None)
.await;
assert!(
matches!(resp.result, GetResult::NotFound),
@ -374,17 +362,9 @@ async fn test_crud_operations() {
#[tokio::test]
async fn test_bad_requests() {
let env = setup_env()
.await
.expect("should have set up the test environment");
let nats_client = env
.nats_client()
.await
.expect("should have created a nats client");
let test_server = setup_server("bad_requests", nats_client).await;
let test_server = setup_server("bad_requests".to_owned()).await;
// Duplicate version
let raw = tokio::fs::read("./oam/sqldbpostgres.yaml")
let raw = tokio::fs::read("./oam/petclinic.yaml")
.await
.expect("Unable to load file");
let resp: PutModelResponse = test_server
@ -394,7 +374,7 @@ async fn test_bad_requests() {
assert_put_response(resp, PutResult::Created, "v0.0.1", 1);
// https://imgflip.com/memegenerator/195657242/Do-it-again
let raw = tokio::fs::read("./oam/sqldbpostgres.yaml")
let raw = tokio::fs::read("./oam/petclinic.yaml")
.await
.expect("Unable to load file");
let resp: PutModelResponse = test_server
@ -408,7 +388,7 @@ async fn test_bad_requests() {
assert!(!resp.message.is_empty(), "Should not have an empty message");
// Setting manifest to latest
let raw = tokio::fs::read("./oam/sqldbpostgres.yaml")
let raw = tokio::fs::read("./oam/petclinic.yaml")
.await
.expect("Unable to load file");
let mut manifest: Manifest = serde_yaml::from_slice(&raw).unwrap();
@ -427,7 +407,7 @@ async fn test_bad_requests() {
assert!(!resp.message.is_empty(), "Should not have an empty message");
// Mismatched name on put
let raw = tokio::fs::read("./oam/sqldbpostgres.yaml")
let raw = tokio::fs::read("./oam/petclinic.yaml")
.await
.expect("Unable to load file");
let resp: PutModelResponse = test_server
@ -443,21 +423,15 @@ async fn test_bad_requests() {
#[tokio::test]
async fn test_delete_noop() {
let env = setup_env()
.await
.expect("should have set up the test environment");
let nats_client = env
.nats_client()
.await
.expect("should have created a nats client");
let test_server = setup_server("delete_noop", nats_client).await;
let test_server = setup_server("delete_noop".to_owned()).await;
// Delete a model that doesn't exist
// Delete something that doesn't exist
let resp: DeleteModelResponse = test_server
.get_response(
"default.model.del.my-example-app",
serde_json::to_vec(&DeleteModelRequest {
version: Some("v0.0.2".to_owned()),
version: "v0.0.2".to_owned(),
delete_all: false,
})
.unwrap(),
None,
@ -469,20 +443,8 @@ async fn test_delete_noop() {
);
assert!(!resp.message.is_empty(), "Should have a message set");
let resp: DeleteModelResponse = test_server
.get_response(
"default.model.del.my-example-app",
serde_json::to_vec(&DeleteModelRequest { version: None }).unwrap(),
None,
)
.await;
assert!(
matches!(resp.result, DeleteResult::Noop),
"Should have gotten noop response for already deleted model"
);
// Delete a non-existent version for an existing model
let raw = tokio::fs::read("./oam/sqldbpostgres.yaml")
// Delete a non-existent version
let raw = tokio::fs::read("./oam/petclinic.yaml")
.await
.expect("Unable to load file");
let resp: PutModelResponse = test_server
@ -493,9 +455,10 @@ async fn test_delete_noop() {
let resp: DeleteModelResponse = test_server
.get_response(
"default.model.del.rust-sqldb-postgres-query",
"default.model.del.petclinic",
serde_json::to_vec(&DeleteModelRequest {
version: Some("v0.0.2".to_owned()),
version: "v0.0.2".to_owned(),
delete_all: false,
})
.unwrap(),
None,
@ -510,18 +473,11 @@ async fn test_delete_noop() {
#[tokio::test]
async fn test_invalid_topics() {
let env = setup_env()
.await
.expect("should have set up the test environment");
let nats_client = env
.nats_client()
.await
.expect("should have created a nats client");
let test_server = setup_server("invalid_topics", nats_client).await;
let test_server = setup_server("invalid_topics".to_owned()).await;
// Put in a manifest to make sure we have something that could be fetched if we aren't handing
// invalid topics correctly
let raw = tokio::fs::read("./oam/sqldbpostgres.yaml")
let raw = tokio::fs::read("./oam/petclinic.yaml")
.await
.expect("Unable to load file");
let resp: PutModelResponse = test_server
@ -548,11 +504,7 @@ async fn test_invalid_topics() {
// Extra things on end
let resp: HashMap<String, String> = test_server
.get_response(
"default.model.get.rust-sqldb-postgres-query.foo.bar",
Vec::new(),
None,
)
.get_response("default.model.get.petclinic.foo.bar", Vec::new(), None)
.await;
assert_eq!(
@ -568,11 +520,7 @@ async fn test_invalid_topics() {
// Random topic
let resp: HashMap<String, String> = test_server
.get_response(
"default.blah.get.rust-sqldb-postgres-query",
Vec::new(),
None,
)
.get_response("default.blah.get.petclinic", Vec::new(), None)
.await;
assert_eq!(
@ -589,14 +537,7 @@ async fn test_invalid_topics() {
#[tokio::test]
async fn test_manifest_parsing() {
let env = setup_env()
.await
.expect("should have set up the test environment");
let nats_client = env
.nats_client()
.await
.expect("should have created a nats client");
let test_server = setup_server("manifest_parsing", nats_client).await;
let test_server = setup_server("manifest_parsing".to_owned()).await;
// Test json manifest with no hint
let raw = tokio::fs::read("./oam/simple1.json")
@ -607,13 +548,10 @@ async fn test_manifest_parsing() {
.get_response("default.model.put", raw, None)
.await;
// This manifest has no version, so it's assigned on as a ULID
let version = resp.current_version.clone();
assert_put_response(resp, PutResult::Created, &version, 1);
assert_put_response(resp, PutResult::Created, "v0.0.1", 1);
// Test yaml manifest with hint
let raw = tokio::fs::read("./oam/sqldbpostgres.yaml")
let raw = tokio::fs::read("./oam/petclinic.yaml")
.await
.expect("Unable to load file");
let resp: PutModelResponse = test_server
@ -649,25 +587,16 @@ async fn test_manifest_parsing() {
let resp: PutModelResponse = test_server
.get_response("default.model.put", raw, None)
.await;
// This manifest has no version, so it's assigned on as a ULID
let version = resp.current_version.clone();
assert_put_response(resp, PutResult::Created, &version, 1);
assert_put_response(resp, PutResult::Created, "v0.0.1", 1);
}
#[tokio::test]
async fn test_deploy() {
let env = setup_env()
.await
.expect("should have set up the test environment");
let nats_client = env
.nats_client()
.await
.expect("should have created a nats client");
let mut test_server = setup_server("deploy_ops", nats_client).await;
let mut test_server = setup_server("deploy_ops".to_owned()).await;
// Create a manifest with 2 versions
let raw = tokio::fs::read("./oam/sqldbpostgres.yaml")
let raw = tokio::fs::read("./oam/petclinic.yaml")
.await
.expect("Unable to load file");
let mut manifest: Manifest = serde_yaml::from_slice(&raw).unwrap();
@ -708,11 +637,7 @@ async fn test_deploy() {
// Deploy using no body
let resp: DeployModelResponse = test_server
.get_response(
"default.model.deploy.rust-sqldb-postgres-query",
Vec::new(),
None,
)
.get_response("default.model.deploy.petclinic", Vec::new(), None)
.await;
assert!(
matches!(resp.result, DeployResult::Acknowledged),
@ -724,11 +649,7 @@ async fn test_deploy() {
.await;
let resp: VersionResponse = test_server
.get_response(
"default.model.versions.rust-sqldb-postgres-query",
Vec::new(),
None,
)
.get_response("default.model.versions.petclinic", Vec::new(), None)
.await;
for info in resp.versions.into_iter() {
match info.version.as_str() {
@ -741,7 +662,7 @@ async fn test_deploy() {
// Now deploy with a specific version
let resp: DeployModelResponse = test_server
.get_response(
"default.model.deploy.rust-sqldb-postgres-query",
"default.model.deploy.petclinic",
serde_json::to_vec(&DeployModelRequest {
version: Some("v0.0.1".to_string()),
})
@ -755,11 +676,7 @@ async fn test_deploy() {
);
let resp: VersionResponse = test_server
.get_response(
"default.model.versions.rust-sqldb-postgres-query",
Vec::new(),
None,
)
.get_response("default.model.versions.petclinic", Vec::new(), None)
.await;
for info in resp.versions.into_iter() {
match info.version.as_str() {
@ -772,7 +689,7 @@ async fn test_deploy() {
// Try to deploy latest
let resp: DeployModelResponse = test_server
.get_response(
"default.model.deploy.rust-sqldb-postgres-query",
"default.model.deploy.petclinic",
serde_json::to_vec(&DeployModelRequest {
version: Some("latest".to_string()),
})
@ -786,11 +703,7 @@ async fn test_deploy() {
);
let resp: VersionResponse = test_server
.get_response(
"default.model.versions.rust-sqldb-postgres-query",
Vec::new(),
None,
)
.get_response("default.model.versions.petclinic", Vec::new(), None)
.await;
for info in resp.versions.into_iter() {
match info.version.as_str() {
@ -802,11 +715,7 @@ async fn test_deploy() {
// Undeploy stuff
let resp: DeployModelResponse = test_server
.get_response(
"default.model.undeploy.rust-sqldb-postgres-query",
Vec::new(),
None,
)
.get_response("default.model.undeploy.petclinic", Vec::new(), None)
.await;
assert!(
matches!(resp.result, DeployResult::Acknowledged),
@ -818,11 +727,7 @@ async fn test_deploy() {
.await;
let resp: VersionResponse = test_server
.get_response(
"default.model.versions.rust-sqldb-postgres-query",
Vec::new(),
None,
)
.get_response("default.model.versions.petclinic", Vec::new(), None)
.await;
assert!(
resp.versions.into_iter().all(|info| !info.deployed),
@ -832,17 +737,10 @@ async fn test_deploy() {
#[tokio::test]
async fn test_delete_deploy() {
let env = setup_env()
.await
.expect("should have set up the test environment");
let nats_client = env
.nats_client()
.await
.expect("should have created a nats client");
let mut test_server = setup_server("deploy_delete", nats_client).await;
let mut test_server = setup_server("deploy_delete".to_owned()).await;
// Create a manifest with 2 versions
let raw = tokio::fs::read("./oam/sqldbpostgres.yaml")
let raw = tokio::fs::read("./oam/petclinic.yaml")
.await
.expect("Unable to load file");
let mut manifest: Manifest = serde_yaml::from_slice(&raw).unwrap();
@ -865,11 +763,7 @@ async fn test_delete_deploy() {
assert_put_response(resp, PutResult::NewVersion, "v0.0.2", 2);
let resp: DeployModelResponse = test_server
.get_response(
"default.model.deploy.rust-sqldb-postgres-query",
Vec::new(),
None,
)
.get_response("default.model.deploy.petclinic", Vec::new(), None)
.await;
assert!(
matches!(resp.result, DeployResult::Acknowledged),
@ -878,9 +772,10 @@ async fn test_delete_deploy() {
let resp: DeleteModelResponse = test_server
.get_response(
"default.model.del.rust-sqldb-postgres-query",
"default.model.del.petclinic",
serde_json::to_vec(&DeleteModelRequest {
version: Some("v0.0.2".to_owned()),
version: "v0.0.2".to_owned(),
delete_all: false,
})
.unwrap(),
None,
@ -897,11 +792,7 @@ async fn test_delete_deploy() {
// Deploy again and then delete all
let resp: DeployModelResponse = test_server
.get_response(
"default.model.deploy.rust-sqldb-postgres-query",
Vec::new(),
None,
)
.get_response("default.model.deploy.petclinic", Vec::new(), None)
.await;
assert!(
matches!(resp.result, DeployResult::Acknowledged),
@ -910,8 +801,12 @@ async fn test_delete_deploy() {
let resp: DeleteModelResponse = test_server
.get_response(
"default.model.del.rust-sqldb-postgres-query",
serde_json::to_vec(&DeleteModelRequest { version: None }).unwrap(),
"default.model.del.petclinic",
serde_json::to_vec(&DeleteModelRequest {
version: String::new(),
delete_all: true,
})
.unwrap(),
None,
)
.await;
@ -927,16 +822,9 @@ async fn test_delete_deploy() {
#[tokio::test]
async fn test_status() {
let env = setup_env()
.await
.expect("should have set up the test environment");
let nats_client = env
.nats_client()
.await
.expect("should have created a nats client");
let test_server = setup_server("status", nats_client).await;
let test_server = setup_server("status".to_owned()).await;
let raw = tokio::fs::read("./oam/sqldbpostgres.yaml")
let raw = tokio::fs::read("./oam/petclinic.yaml")
.await
.expect("Unable to load file");
let resp: PutModelResponse = test_server
@ -945,11 +833,7 @@ async fn test_status() {
assert_put_response(resp, PutResult::Created, "v0.0.1", 1);
let resp: StatusResponse = test_server
.get_response(
"default.model.status.rust-sqldb-postgres-query",
Vec::new(),
None,
)
.get_response("default.model.status.petclinic", Vec::new(), None)
.await;
// This is just checking it returns a valid default status. e2e tests will have to check actual

View File

@ -1,73 +1,58 @@
use std::collections::BTreeMap;
use futures::TryStreamExt;
use serial_test::serial;
use tokio::time::{timeout, Duration};
use wadm::commands::*;
mod helpers;
use helpers::{setup_env, StreamWrapper};
use helpers::StreamWrapper;
#[tokio::test]
async fn test_consumer_stream() {
let env = setup_env()
.await
.expect("should have set up the test environment");
let nats_client = env
.nats_client()
.await
.expect("should have created a nats client for the test setup");
let mut wrapper = StreamWrapper::new("consumer_stream".into(), nats_client.clone()).await;
let mut wrapper = StreamWrapper::new("consumer_stream".into(), None).await;
// Publish a whole bunch of commands to the stream
wrapper
.publish_command(ScaleComponent {
component_id: "barfood".to_string(),
component_id: None,
reference: "foobar".to_string(),
host_id: "fakehost".to_string(),
count: 3,
model_name: "fake".into(),
annotations: BTreeMap::new(),
config: vec![],
})
.await;
wrapper
.publish_command(StartProvider {
reference: "baz".to_string(),
provider_id: "fakepay".to_string(),
host_id: "fakehost".to_string(),
model_name: "fake".into(),
config: vec![],
annotations: BTreeMap::new(),
..Default::default()
})
.await;
wrapper
.publish_command(PutLink {
source_id: "barfood".to_string(),
target: "fakepay".to_string(),
name: "default".to_string(),
wit_namespace: "dontreallycare".to_string(),
wit_package: "doesitmatter".to_string(),
model_name: "fake".to_string(),
interfaces: vec!["something".to_string()],
.publish_command(PutLinkdef {
component_id: "foobar".to_string(),
provider_id: "fakehost".to_string(),
contract_id: "wasmcloud:httpserver".to_string(),
model_name: "fake".into(),
..Default::default()
})
.await;
// Make sure we get the right data back, in the right order
let mut cmd = wrapper.wait_for_command().await;
if let Command::ScaleComponent(event) = cmd.as_ref() {
if let Command::ScaleComponent(actor) = cmd.as_ref() {
assert_eq!(
event.reference,
actor.reference,
"foobar",
"Expected to get a valid start component command, got command: {:?}",
"Expected to get a valid start actor command, got command: {:?}",
cmd.as_ref()
);
} else {
panic!(
"Event wasn't a start component command. Got {:?}",
cmd.as_ref()
);
panic!("Event wasn't a start actor command. Got {:?}", cmd.as_ref());
}
cmd.ack().await.expect("Should be able to ack message");
@ -88,16 +73,10 @@ async fn test_consumer_stream() {
cmd.ack().await.expect("Should be able to ack message");
let mut cmd = wrapper.wait_for_command().await;
if let Command::PutLink(link) = cmd.as_ref() {
if let Command::PutLinkdef(ld) = cmd.as_ref() {
assert_eq!(
link.wit_namespace,
"dontreallycare",
"Expected to get a valid put linkdef command, got command: {:?}",
cmd.as_ref()
);
assert_eq!(
link.wit_package,
"doesitmatter",
ld.contract_id,
"wasmcloud:httpserver",
"Expected to get a valid put linkdef command, got command: {:?}",
cmd.as_ref()
);
@ -112,94 +91,78 @@ async fn test_consumer_stream() {
.expect_err("No more commands should have been received");
// Send some garbage data, then some normal data and make sure it just skips
nats_client
wrapper
.client
.publish(wrapper.topic.clone(), "{\"fake\": \"json\"}".into())
.await
.expect("Should be able to publish data");
wrapper
.publish_command(ScaleComponent {
component_id: "foobar".to_string(),
component_id: Some("foobar".to_string()),
reference: "foobarref".to_string(),
host_id: "fakehost".to_string(),
count: 0,
model_name: "fake".into(),
annotations: BTreeMap::new(),
..Default::default()
})
.await;
let mut cmd = wrapper.wait_for_command().await;
if let Command::ScaleComponent(event) = cmd.as_ref() {
if let Command::ScaleComponent(actor) = cmd.as_ref() {
assert_eq!(
event.component_id,
"foobar".to_string(),
"Expected to get a valid stop component command, got command: {:?}",
actor.component_id,
Some("foobar".to_string()),
"Expected to get a valid stop actor command, got command: {:?}",
cmd.as_ref()
);
} else {
panic!(
"Event wasn't a stop component command. Got {:?}",
cmd.as_ref()
);
panic!("Event wasn't a stop actor command. Got {:?}", cmd.as_ref());
}
cmd.ack().await.expect("Should be able to ack message");
}
#[tokio::test]
#[serial]
async fn test_nack_and_rereceive() {
let env = setup_env()
.await
.expect("should have set up the test environment");
let nats_client = env
.nats_client()
.await
.expect("should have created a nats client for the test setup");
let mut wrapper = StreamWrapper::new("nack_and_rereceive".into(), nats_client).await;
let mut wrapper = StreamWrapper::new("nack_and_rereceive".into(), None).await;
// Send an event
wrapper
.publish_command(ScaleComponent {
component_id: "barfood".to_string(),
component_id: None,
reference: "foobar".to_string(),
host_id: "fakehost".to_string(),
count: 3,
model_name: "fake".into(),
annotations: BTreeMap::new(),
config: vec![],
})
.await;
// Get the event and then nack it
let mut cmd = wrapper.wait_for_command().await;
// Make sure we got the right event
if let Command::ScaleComponent(event) = cmd.as_ref() {
if let Command::ScaleComponent(actor) = cmd.as_ref() {
assert_eq!(
event.reference,
actor.reference,
"foobar",
"Expected to get a valid start component command, got command: {:?}",
"Expected to get a valid start actor command, got command: {:?}",
cmd.as_ref()
);
} else {
panic!(
"Event wasn't a start component command. Got {:?}",
cmd.as_ref()
);
panic!("Event wasn't a start actor command. Got {:?}", cmd.as_ref());
}
cmd.nack().await;
// Now do it again and make sure we get the same event
if let Command::ScaleComponent(event) = wrapper.wait_for_command().await.as_ref() {
if let Command::ScaleComponent(actor) = wrapper.wait_for_command().await.as_ref() {
assert_eq!(
event.reference,
actor.reference,
"foobar",
"Expected to get a valid start component command, got command: {:?}",
"Expected to get a valid start actor command, got command: {:?}",
cmd.as_ref()
);
} else {
panic!(
"Event wasn't a start component command. Got {:?}",
cmd.as_ref()
);
panic!("Event wasn't a start actor command. Got {:?}", cmd.as_ref());
}
cmd.ack().await.expect("Should be able to ack");
}

View File

@ -1,58 +1,55 @@
use std::collections::{BTreeMap, HashMap};
use std::collections::BTreeMap;
use futures::StreamExt;
use serial_test::serial;
use wadm::{
commands::*, consumers::manager::Worker, workers::CommandWorker, MANAGED_BY_ANNOTATION,
commands::*, consumers::manager::Worker, model::CapabilityConfig, workers::CommandWorker,
};
mod helpers;
use helpers::{
setup_env, StreamWrapper, HELLO_COMPONENT_ID, HELLO_IMAGE_REF, HTTP_SERVER_COMPONENT_ID,
HTTP_SERVER_IMAGE_REF,
setup_test_wash, StreamWrapper, TestWashConfig, ECHO_ACTOR_ID, HTTP_SERVER_PROVIDER_ID,
};
#[tokio::test]
// TODO: Run in parallel once https://github.com/wasmCloud/wash/issues/402 is fixed. Please
// note this test should probably be changed to an e2e test as the order of events is somewhat flaky
#[serial]
async fn test_commands() {
let env = setup_env()
.await
.expect("should have set up the test environment");
let nats_client = env
.nats_client()
.await
.expect("should have created a nats client for the test setup");
let config = TestWashConfig::random().await.unwrap();
let _guard = setup_test_wash(&config).await;
let mut wrapper = StreamWrapper::new("commands_integration".into(), nats_client.clone()).await;
let mut wrapper = StreamWrapper::new("commands_integration".into(), config.nats_port).await;
let ctl_client = wasmcloud_control_interface::ClientBuilder::new(nats_client.clone()).build();
let ctl_client =
wasmcloud_control_interface::ClientBuilder::new(wrapper.client.clone()).build();
let worker = CommandWorker::new(ctl_client.clone());
let host_id = ctl_client
.get_hosts()
.await
.expect("should get hosts back")
.first()
.unwrap()
.get(0)
.expect("Should be able to find hosts")
.data()
.map(|h| h.id())
.expect("should be able to get host")
.id
.to_owned();
let mut sub = nats_client
let mut sub = wrapper
.client
.subscribe("wasmbus.evt.default.>".to_string())
.await
.unwrap();
// Start a component
// Start an actor
wrapper
.publish_command(ScaleComponent {
component_id: HELLO_COMPONENT_ID.to_string(),
reference: HELLO_IMAGE_REF.to_string(),
component_id: Some(ECHO_ACTOR_ID.to_string()),
reference: "wasmcloud.azurecr.io/echo:0.3.4".to_string(),
host_id: host_id.clone(),
count: 2,
model_name: "fake".into(),
annotations: BTreeMap::new(),
config: vec![],
})
.await;
@ -62,38 +59,41 @@ async fn test_commands() {
.await
.expect("Should be able to handle command properly");
wait_for_event(&mut sub, "component_scaled").await;
wait_for_event(&mut sub, "actors_started").await;
// Sorry for the lazy de-racing, but for some reason if we don't wait for a bit the host hasn't
// finished updating its inventory
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
// Get the current components and make sure stuff was started
let resp_data = ctl_client
// Get the current actors and make sure stuff was started
let inventory = ctl_client
.get_host_inventory(&host_id)
.await
.expect("should get host inventory back")
.into_data()
.expect("should have host inventory");
let inventory = resp_data.components();
.unwrap()
.actors;
assert_eq!(
inventory.len(),
1,
"Should only have 1 component: {:?}",
"Should only have 1 actor: {:?}",
inventory
);
assert_eq!(
inventory[0].image_ref(),
HELLO_IMAGE_REF,
"Should have started the correct component"
inventory[0].image_ref.as_deref().unwrap(),
"wasmcloud.azurecr.io/echo:0.3.4",
"Should have started the correct actor"
);
assert_eq!(
inventory[0].max_instances(),
2,
"Should have started the component with correct concurrency"
inventory[0].instances.len(),
1,
"Should have started the correct number of actors"
);
assert_eq!(
inventory[0]
.annotations()
inventory[0].instances[0].max_concurrent, 2,
"Should have started the actor with correct concurrency"
);
assert_eq!(
inventory[0].instances[0]
.annotations
.as_ref()
.unwrap()
.get(wadm::MANAGED_BY_ANNOTATION)
.expect("Should have the managed by annotation"),
@ -101,8 +101,9 @@ async fn test_commands() {
"Should have the proper identifier"
);
assert_eq!(
inventory[0]
.annotations()
inventory[0].instances[0]
.annotations
.as_ref()
.unwrap()
.get(wadm::APP_SPEC_ANNOTATION)
.expect("Should have the managed by annotation"),
@ -110,29 +111,17 @@ async fn test_commands() {
"Should have the proper identifier"
);
// Create configuration for provider
wrapper
.publish_command(PutConfig {
config_name: "fake-http_address".to_string(),
config: HashMap::from_iter([("address".to_string(), "0.0.0.0:8080".to_string())]),
})
.await;
let msg = wrapper.wait_for_command().await;
worker
.do_work(msg)
.await
.expect("Should be able to handle command properly");
// Start a provider
wrapper
.publish_command(StartProvider {
reference: HTTP_SERVER_IMAGE_REF.to_string(),
provider_id: HTTP_SERVER_COMPONENT_ID.to_owned(),
reference: "wasmcloud.azurecr.io/httpserver:0.17.0".to_string(),
host_id: host_id.clone(),
link_name: None,
model_name: "fake".into(),
annotations: BTreeMap::new(),
config: vec!["fake-http_address".to_string()],
config: Some(CapabilityConfig::Opaque(
"{\"address\":\"0.0.0.0:8080\"}".to_string(),
)),
})
.await;
@ -148,22 +137,21 @@ async fn test_commands() {
wait_for_event(&mut sub, "health_check_passed").await;
// Get the current providers and make sure stuff was started
let resp_data = ctl_client
let inventory = ctl_client
.get_host_inventory(&host_id)
.await
.expect("should get host inventory back")
.into_data()
.expect("should have host inventory");
let inventory = resp_data.providers();
.unwrap()
.providers;
assert_eq!(inventory.len(), 1, "Should only have 1 provider");
assert_eq!(
inventory[0].image_ref().unwrap(),
HTTP_SERVER_IMAGE_REF,
inventory[0].image_ref.as_deref().unwrap(),
"wasmcloud.azurecr.io/httpserver:0.17.0",
"Should have started the correct provider"
);
assert_eq!(
inventory[0]
.annotations()
.annotations
.as_ref()
.unwrap()
.get(wadm::MANAGED_BY_ANNOTATION)
.expect("Should have the managed by annotation"),
@ -172,7 +160,8 @@ async fn test_commands() {
);
assert_eq!(
inventory[0]
.annotations()
.annotations
.as_ref()
.unwrap()
.get(wadm::APP_SPEC_ANNOTATION)
.expect("Should have the managed by annotation"),
@ -182,15 +171,13 @@ async fn test_commands() {
// Put a linkdef
wrapper
.publish_command(PutLink {
source_id: HTTP_SERVER_COMPONENT_ID.to_owned(),
target: HELLO_COMPONENT_ID.to_owned(),
name: wadm::DEFAULT_LINK_NAME.to_owned(),
wit_namespace: "wasi".to_string(),
wit_package: "http".to_string(),
interfaces: vec!["incoming-handler".to_string()],
model_name: "fake".to_string(),
..Default::default()
.publish_command(PutLinkdef {
component_id: ECHO_ACTOR_ID.to_owned(),
provider_id: HTTP_SERVER_PROVIDER_ID.to_owned(),
link_name: wadm::DEFAULT_LINK_NAME.to_owned(),
contract_id: "wasmcloud:httpserver".to_string(),
values: [("ADDRESS".to_string(), "0.0.0.0:9999".to_string())].into(),
model_name: "fake".into(),
})
.await;
@ -202,31 +189,25 @@ async fn test_commands() {
wait_for_event(&mut sub, "linkdef_set").await;
// Get the current components and make sure stuff was started
let inventory = ctl_client
.get_links()
.await
.expect("should get links back")
.into_data()
.expect("should have links");
// Get the current actors and make sure stuff was started
let inventory = ctl_client.query_links().await.unwrap();
// We could have more than one link due to local testing, so search for the proper link
inventory
.into_iter()
.find(|ld| {
ld.source_id() == HTTP_SERVER_COMPONENT_ID
&& ld.target() == HELLO_COMPONENT_ID
&& ld.wit_namespace() == "wasi"
&& ld.wit_package() == "http"
ld.component_id == ECHO_ACTOR_ID
&& ld.provider_id == HTTP_SERVER_PROVIDER_ID
&& ld.contract_id == "wasmcloud:httpserver"
})
.expect("Linkdef should exist");
// Delete the linkdef
wrapper
.publish_command(DeleteLink {
source_id: HTTP_SERVER_COMPONENT_ID.to_owned(),
.publish_command(DeleteLinkdef {
component_id: ECHO_ACTOR_ID.to_owned(),
provider_id: HTTP_SERVER_PROVIDER_ID.to_owned(),
link_name: wadm::DEFAULT_LINK_NAME.to_owned(),
wit_namespace: "wasi".to_string(),
wit_package: "http".to_string(),
contract_id: "wasmcloud:httpserver".to_string(),
model_name: "fake".into(),
})
.await;
@ -239,20 +220,14 @@ async fn test_commands() {
wait_for_event(&mut sub, "linkdef_deleted").await;
// Get the current components and make sure stuff was started
let inventory = ctl_client
.get_links()
.await
.expect("should get links back")
.into_data()
.expect("should have links");
// Get the current actors and make sure stuff was started
let inventory = ctl_client.query_links().await.unwrap();
// We could have more than one link due to local testing, so search for the proper link
assert!(
!inventory.into_iter().any(|ld| {
ld.target() == HELLO_COMPONENT_ID
&& ld.source_id() == HTTP_SERVER_COMPONENT_ID
&& ld.wit_namespace() == "wasi"
&& ld.wit_package() == "http"
ld.component_id == ECHO_ACTOR_ID
&& ld.provider_id == HTTP_SERVER_PROVIDER_ID
&& ld.contract_id == "wasmcloud:httpserver"
}),
"Linkdef should be deleted"
);
@ -260,7 +235,9 @@ async fn test_commands() {
// Stop the provider
wrapper
.publish_command(StopProvider {
provider_id: HTTP_SERVER_COMPONENT_ID.to_owned(),
provider_id: HTTP_SERVER_PROVIDER_ID.to_owned(),
contract_id: "wasmcloud:httpserver".to_owned(),
link_name: None,
host_id: host_id.clone(),
model_name: "fake".into(),
annotations: BTreeMap::new(),
@ -276,25 +253,22 @@ async fn test_commands() {
wait_for_event(&mut sub, "provider_stopped").await;
// Get the current providers and make sure stuff was started
let resp_data = ctl_client
let inventory = ctl_client
.get_host_inventory(&host_id)
.await
.expect("should get host inventory back")
.into_data()
.expect("should have host inventory");
let inventory = resp_data.providers();
.unwrap()
.providers;
assert!(inventory.is_empty(), "Should have no providers");
// Stop the component
// Stop the actor
wrapper
.publish_command(ScaleComponent {
component_id: HELLO_COMPONENT_ID.to_owned(),
reference: HELLO_IMAGE_REF.to_string(),
component_id: Some(ECHO_ACTOR_ID.to_owned()),
reference: "wasmcloud.azurecr.io/echo:0.3.4".to_string(),
count: 0,
host_id: host_id.clone(),
model_name: "fake".into(),
annotations: BTreeMap::new(),
config: vec![],
})
.await;
@ -304,74 +278,70 @@ async fn test_commands() {
.await
.expect("Should be able to handle command properly");
wait_for_event(&mut sub, "component_scaled").await;
wait_for_event(&mut sub, "actors_stopped").await;
// Get the current providers and make sure stuff was started
let resp_data = ctl_client
let inventory = ctl_client
.get_host_inventory(&host_id)
.await
.expect("should get host inventory back")
.into_data()
.expect("should have host inventory");
let inventory = resp_data.components();
assert!(inventory.is_empty(), "Should have no components");
.unwrap()
.actors;
assert!(inventory.is_empty(), "Should have no actors");
}
#[tokio::test]
// TODO: Run in parallel once https://github.com/wasmCloud/wash/issues/402 is fixed. Please
// note this test should probably be changed to an e2e test as the order of events is somewhat flaky
#[serial]
async fn test_annotation_stop() {
// This test is a sanity check that we only stop annotated components
let env = setup_env()
.await
.expect("should have set up the test environment");
let nats_client = env
.nats_client()
.await
.expect("should have created a nats client for the test setup");
// This test is a sanity check that we only stop annotated actors
let config = TestWashConfig::random().await.unwrap();
let _guard = setup_test_wash(&config).await;
let mut wrapper = StreamWrapper::new("annotation_stop".into(), nats_client.clone()).await;
let mut wrapper = StreamWrapper::new("annotation_stop".into(), config.nats_port).await;
let ctl_client = wasmcloud_control_interface::ClientBuilder::new(nats_client.clone()).build();
let ctl_client =
wasmcloud_control_interface::ClientBuilder::new(wrapper.client.clone()).build();
let worker = CommandWorker::new(ctl_client.clone());
let mut sub = nats_client
let mut sub = wrapper
.client
.subscribe("wasmbus.evt.default.>".to_string())
.await
.unwrap();
let responses = ctl_client.get_hosts().await.unwrap();
let host_id = responses
.first()
let host_id = ctl_client
.get_hosts()
.await
.unwrap()
.get(0)
.expect("Should be able to find hosts")
.data()
.map(|v| v.id())
.unwrap();
.id
.to_owned();
// Start an unmangaged component
// Start an unmangaged actor
// NOTE(thomastaylor312): This is a workaround with current behavior where empty annotations
// acts on _everything_. We could technically move this back down after the initial scale up of
// the managed components after https://github.com/wasmCloud/wasmCloud/issues/746 is resolved
// the managed actors after https://github.com/wasmCloud/wasmCloud/issues/746 is resolved
ctl_client
.scale_component(host_id, HELLO_IMAGE_REF, "unmanaged-hello", 1, None, vec![])
.scale_component(&host_id, "wasmcloud.azurecr.io/echo:0.3.4", Some(1), None)
.await
.unwrap();
wait_for_event(&mut sub, "component_scaled").await;
wait_for_event(&mut sub, "actors_started").await;
// Sorry for the lazy de-racing, but for some reason if we don't wait for a bit the host hasn't
// finished updating its inventory
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
// Start a component
// Start an actor
wrapper
.publish_command(ScaleComponent {
component_id: HELLO_COMPONENT_ID.to_string(),
reference: HELLO_IMAGE_REF.to_string(),
host_id: host_id.into(),
component_id: Some(ECHO_ACTOR_ID.to_string()),
reference: "wasmcloud.azurecr.io/echo:0.3.4".to_string(),
host_id: host_id.clone(),
count: 2,
model_name: "fake".into(),
annotations: BTreeMap::from_iter([("fake".to_string(), "wake".to_string())]),
..Default::default()
})
.await;
@ -381,58 +351,49 @@ async fn test_annotation_stop() {
.await
.expect("Should be able to handle command properly");
// Wait for the component_scaled event
wait_for_event(&mut sub, "component_scaled").await;
// Wait for the actors_started event
wait_for_event(&mut sub, "actors_started").await;
// Sorry for the lazy de-racing, but for some reason if we don't wait for a bit the host hasn't
// finished updating its inventory
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
// Get the current components and make sure stuff was started
let resp_data = ctl_client
.get_host_inventory(host_id)
// Get the current actors and make sure stuff was started
let inventory = ctl_client
.get_host_inventory(&host_id)
.await
.expect("should get host inventory back")
.into_data()
.expect("should have host inventory");
let inventory = resp_data.components();
let managed_inventory = inventory
.iter()
.filter(|c| {
c.annotations()
.is_some_and(|a| a.contains_key(MANAGED_BY_ANNOTATION))
})
.collect::<Vec<_>>();
assert_eq!(inventory.len(), 2, "Should only have 2 components");
.unwrap()
.actors;
assert_eq!(inventory.len(), 1, "Should only have 1 actor");
assert_eq!(
managed_inventory.len(),
1,
"Should only have 1 managed component"
inventory[0].image_ref.as_deref().unwrap(),
"wasmcloud.azurecr.io/echo:0.3.4",
"Should have started the correct actor"
);
assert_eq!(
managed_inventory[0].image_ref(),
HELLO_IMAGE_REF,
"Should have started the correct component"
);
assert!(managed_inventory[0]
.annotations()
.map(|annotations| annotations.contains_key(MANAGED_BY_ANNOTATION))
.unwrap_or(false));
assert_eq!(
managed_inventory[0].max_instances(),
inventory[0]
.instances
.iter()
// Only select the managed actors
.filter(|inst| inst
.annotations
.as_ref()
.map(|annotations| !annotations.is_empty())
.unwrap_or(false))
.map(|i| { i.max_concurrent })
.sum::<u16>(),
2,
"Should have started the correct concurrency of components"
"Should have started the correct number of actors"
);
// Stop the managed components
// Stop the managed actors
wrapper
.publish_command(ScaleComponent {
component_id: HELLO_COMPONENT_ID.to_owned(),
reference: HELLO_IMAGE_REF.to_string(),
component_id: Some(ECHO_ACTOR_ID.to_owned()),
reference: "wasmcloud.azurecr.io/echo:0.3.4".to_string(),
count: 0,
host_id: host_id.into(),
host_id: host_id.clone(),
model_name: "fake".into(),
annotations: BTreeMap::from_iter([("fake".to_string(), "wake".to_string())]),
config: vec![],
})
.await;
@ -442,32 +403,30 @@ async fn test_annotation_stop() {
.await
.expect("Should be able to handle command properly");
wait_for_event(&mut sub, "component_scaled").await;
wait_for_event(&mut sub, "actors_stopped").await;
// Get the current providers and make sure stuff was started
let resp_data = ctl_client
.get_host_inventory(host_id)
let inventory = ctl_client
.get_host_inventory(&host_id)
.await
.unwrap()
.into_data()
.unwrap();
let inventory = resp_data.components();
assert_eq!(inventory.len(), 1, "Should only have 1 component");
.actors;
assert_eq!(inventory.len(), 1, "Should only have 1 actor");
assert_eq!(
inventory[0].image_ref(),
HELLO_IMAGE_REF,
"Should have started the correct component"
inventory[0].image_ref.as_deref().unwrap(),
"wasmcloud.azurecr.io/echo:0.3.4",
"Should have started the correct actor"
);
assert_eq!(
inventory[0].max_instances(),
inventory[0].instances.len(),
1,
"Should have 1 unmanaged component still running"
"Should have 1 unmanaged actor still running"
);
}
async fn wait_for_event(sub: &mut async_nats::Subscriber, match_text: &str) {
// Providers can take a bit to start if they are downloading
let mut interval = tokio::time::interval(std::time::Duration::from_secs(30));
let mut interval = tokio::time::interval(std::time::Duration::from_secs(15));
// Consume the initial tick
interval.tick().await;
loop {

398
nottests/e2e_multitenant.rs Normal file
View File

@ -0,0 +1,398 @@
#![cfg(feature = "_e2e_tests")]
use std::{path::PathBuf, time::Duration};
use futures::StreamExt;
use wadm::server::{DeployResult, PutResult, StatusType};
mod e2e;
mod helpers;
use e2e::{assert_status, check_actors, check_providers, ClientInfo, ExpectedCount};
use helpers::{ECHO_ACTOR_ID, HTTP_SERVER_PROVIDER_ID};
use crate::e2e::check_status;
const MANIFESTS_PATH: &str = "test/data";
const DOCKER_COMPOSE_FILE: &str = "test/docker-compose-e2e-multitenant.yaml";
const MESSAGE_PUB_ACTOR_ID: &str = "MC3QONHYH3FY4KYFCOSVJWIDJG4WA2PVD6FHKR7FFT457GVUTZJYR2TJ";
const NATS_PROVIDER_ID: &str = "VADNMSIML2XGO2X4TPIONTIC55R2UUQGPPDZPAVSC2QD7E76CR77SPW7";
const ACCOUNT_EAST: &str = "Axxx";
const ACCOUNT_WEST: &str = "Ayyy";
const LATTICE_EAST: &str = "wasmcloud-east";
const LATTICE_WEST: &str = "wasmcloud-west";
#[cfg(feature = "_e2e_tests")]
#[tokio::test(flavor = "multi_thread")]
async fn run_multitenant_tests() {
let root_dir =
PathBuf::from(std::env::var("CARGO_MANIFEST_DIR").expect("Unable to find repo root"));
let manifest_dir = root_dir.join(MANIFESTS_PATH);
let compose_file = root_dir.join(DOCKER_COMPOSE_FILE);
// Enable multitenancy
std::env::set_var("WADM_MULTITENANT", "true");
let mut client_info = ClientInfo::new(manifest_dir, compose_file).await;
client_info
.add_ctl_client(LATTICE_EAST, Some("Axxx.wasmbus.ctl"))
.await;
client_info
.add_ctl_client(LATTICE_WEST, Some("Ayyy.wasmbus.ctl"))
.await;
client_info.launch_wadm().await;
// Wait for the first event on both lattice prefixes before we start deploying and checking
// statuses. Wadm can absolutely handle hosts starting before you start the wadm process, but the first event
// on the lattice will initialize the lattice monitor and for the following test we quickly assert things.
let mut east_sub = client_info
.client
.subscribe(format!("wadm.evt.{LATTICE_EAST}"))
.await
.expect("Should be able to subscribe to east events");
let mut west_sub = client_info
.client
.subscribe(format!("wadm.evt.{LATTICE_WEST}"))
.await
.expect("Should be able to subscribe to west events");
let _ = east_sub.next().await;
let _ = west_sub.next().await;
// NOTE(thomastaylor312): A nice to have here, but what I didn't want to figure out now, would
// be to catch the panics from tests and label the backtrace with the appropriate information
// about which test failed. Another issue is that only the first panic will be returned, so
// capturing the backtraces and then printing them nicely would probably be good
// We run this test first by itself because it is a basic test that wadm only spins up the exact
// number of resources requested. If we were to run it in parallel, some of the shared resources
// will be created with other tests (namely providers) and this test will fail
test_basic_separation(&client_info)
.await
.expect("basic multitenant separation to work");
}
async fn test_basic_separation(client_info: &ClientInfo) -> anyhow::Result<()> {
let stream = client_info.get_status_stream().await;
stream
.purge()
.await
.expect("shouldn't have errored purging stream");
let resp = client_info
.put_manifest_from_file("simple.yaml", Some(ACCOUNT_EAST), Some(LATTICE_EAST))
.await;
assert_ne!(
resp.result,
PutResult::Error,
"Shouldn't have errored when creating manifest: {resp:?}"
);
let resp = client_info
.put_manifest_from_file("simple2.yaml", Some(ACCOUNT_WEST), Some(LATTICE_WEST))
.await;
assert_ne!(
resp.result,
PutResult::Error,
"Shouldn't have errored when creating manifest: {resp:?}"
);
eprintln!("Deploying manifests to east and west");
let resp = client_info
.deploy_manifest("echo-simple", Some(ACCOUNT_EAST), Some(LATTICE_EAST), None)
.await;
assert_ne!(
resp.result,
DeployResult::Error,
"Shouldn't have errored when deploying manifest: {resp:?}"
);
let resp = client_info
.deploy_manifest(
"messaging-simple",
Some(ACCOUNT_WEST),
Some(LATTICE_WEST),
None,
)
.await;
assert_ne!(
resp.result,
DeployResult::Error,
"Shouldn't have errored when deploying manifest: {resp:?}"
);
// Once manifest is deployed, first status should be compensating
check_status(
&stream,
LATTICE_EAST,
"echo-simple",
StatusType::Reconciling,
)
.await
.unwrap();
check_status(
&stream,
LATTICE_WEST,
"messaging-simple",
StatusType::Reconciling,
)
.await
.unwrap();
// NOTE: This runs for a while, but it's because we're waiting for the provider to download,
// which can take a bit
// Ensure echo deployed in east and messaging deployed in west
assert_status(None, Some(7), || async {
let east_inventory = client_info.get_all_inventory(LATTICE_EAST).await?;
let west_inventory = client_info.get_all_inventory(LATTICE_WEST).await?;
// Check for echo actor and httpserver in east, as well as the link between them
eprintln!("Ensuring east has echo, httpserver and link");
check_actors(
&east_inventory,
"wasmcloud.azurecr.io/echo:0.3.7",
"echo-simple",
4,
)?;
check_providers(
&east_inventory,
"wasmcloud.azurecr.io/httpserver:0.17.0",
ExpectedCount::Exactly(1),
)?;
// Oh no a sleep! How horrible!
// Actually, this is a good thing! If we reach this point because the httpserver
// provider upgraded really quickly, that means we still have to wait 5 seconds
// for the provider health check to trigger linkdef creation. So, after everything
// gets created, give the linkdef scaler time to react to the provider health check.
tokio::time::sleep(Duration::from_secs(5)).await;
let links = client_info
.ctl_client(LATTICE_EAST)
.query_links()
.await
.map_err(|e| anyhow::anyhow!("{e:?}"))?;
if !links.iter().any(|ld| {
ld.component_id == ECHO_ACTOR_ID
&& ld.provider_id == HTTP_SERVER_PROVIDER_ID
&& ld.contract_id == "wasmcloud:httpserver"
}) {
anyhow::bail!(
"Link between echo actor and http provider should exist: {:#?}",
links
)
}
// Check for messaging actor, httpserver and messaging in west, as well as the links between them
eprintln!("Ensuring west has message-pub, httpserver, messaging and link");
check_actors(
&west_inventory,
"wasmcloud.azurecr.io/message-pub:0.1.3",
"messaging-simple",
1,
)?;
check_providers(
&west_inventory,
"wasmcloud.azurecr.io/httpserver:0.18.2",
ExpectedCount::Exactly(1),
)?;
check_providers(
&west_inventory,
"wasmcloud.azurecr.io/nats_messaging:0.17.2",
ExpectedCount::Exactly(1),
)?;
let links = client_info
.ctl_client(LATTICE_WEST)
.query_links()
.await
.map_err(|e| anyhow::anyhow!("{e:?}"))?;
if !links.iter().any(|ld| {
ld.component_id == MESSAGE_PUB_ACTOR_ID
&& ld.provider_id == HTTP_SERVER_PROVIDER_ID
&& ld.contract_id == "wasmcloud:httpserver"
}) {
anyhow::bail!(
"Link between messaging actor and http provider should exist: {:#?}",
links
)
}
if !links.iter().any(|ld| {
ld.component_id == MESSAGE_PUB_ACTOR_ID
&& ld.provider_id == NATS_PROVIDER_ID
&& ld.contract_id == "wasmcloud:messaging"
}) {
anyhow::bail!(
"Link between messaging actor and nats provider should exist: {:#?}",
links
)
}
// Check to ensure that no resources from west are running in east and vice versa
eprintln!("Ensuring east has no west resources and vice versa");
check_actors(
&west_inventory,
"wasmcloud.azurecr.io/echo:0.3.7",
"echo-simple",
0,
)?;
check_providers(
&west_inventory,
"wasmcloud.azurecr.io/httpserver:0.17.0",
ExpectedCount::Exactly(0),
)?;
let links = client_info
.ctl_client(LATTICE_WEST)
.query_links()
.await
.map_err(|e| anyhow::anyhow!("{e:?}"))?;
if links.iter().any(|ld| {
ld.component_id == ECHO_ACTOR_ID
&& ld.provider_id == HTTP_SERVER_PROVIDER_ID
&& ld.contract_id == "wasmcloud:httpserver"
}) {
anyhow::bail!(
"Link between echo actor and http provider should not exist: {:#?}",
links
)
}
check_actors(
&east_inventory,
"wasmcloud.azurecr.io/message-pub:0.1.3",
"messaging-simple",
0,
)?;
check_providers(
&east_inventory,
"wasmcloud.azurecr.io/httpserver:0.18.2",
ExpectedCount::Exactly(0),
)?;
check_providers(
&east_inventory,
"wasmcloud.azurecr.io/nats_messaging:0.17.2",
ExpectedCount::Exactly(0),
)?;
let links = client_info
.ctl_client(LATTICE_EAST)
.query_links()
.await
.map_err(|e| anyhow::anyhow!("{e:?}"))?;
if links.iter().any(|ld| {
ld.component_id == MESSAGE_PUB_ACTOR_ID
&& ld.provider_id == HTTP_SERVER_PROVIDER_ID
&& ld.contract_id == "wasmcloud:httpserver"
}) {
anyhow::bail!(
"Link between messagepub actor and http provider should not exist: {:#?}",
links
)
}
if links.iter().any(|ld| {
ld.component_id == MESSAGE_PUB_ACTOR_ID
&& ld.provider_id == NATS_PROVIDER_ID
&& ld.contract_id == "wasmcloud:messaging"
}) {
anyhow::bail!(
"Link between messagepub actor and http provider should not exist: {:#?}",
links
)
}
check_status(&stream, LATTICE_EAST, "echo-simple", StatusType::Deployed)
.await
.unwrap();
check_status(
&stream,
LATTICE_WEST,
"messaging-simple",
StatusType::Deployed,
)
.await
.unwrap();
Ok(())
})
.await;
eprintln!("Everything good, undeploying manifests");
// sleep 10 seconds
tokio::time::sleep(std::time::Duration::from_secs(10)).await;
// Undeploy manifests
eprintln!("Undeploying manifest from east and west");
let resp = client_info
.undeploy_manifest("echo-simple", Some(ACCOUNT_EAST), Some(LATTICE_EAST))
.await;
assert_ne!(
resp.result,
DeployResult::Error,
"Shouldn't have errored when undeploying manifest: {resp:?}"
);
let resp = client_info
.undeploy_manifest("messaging-simple", Some(ACCOUNT_WEST), Some(LATTICE_WEST))
.await;
assert_ne!(
resp.result,
DeployResult::Error,
"Shouldn't have errored when undeploying manifest: {resp:?}"
);
check_status(&stream, LATTICE_EAST, "echo-simple", StatusType::Undeployed)
.await
.unwrap();
check_status(
&stream,
LATTICE_WEST,
"messaging-simple",
StatusType::Undeployed,
)
.await
.unwrap();
// assert that no actors or providers with annotations exist
assert_status(None, None, || async {
let east_inventory = client_info.get_all_inventory(LATTICE_EAST).await?;
println!("east inventory: {:?}", east_inventory);
let west_inventory = client_info.get_all_inventory(LATTICE_WEST).await?;
println!("west inventory: {:?}", west_inventory);
eprintln!("Ensuring resources stopped in east");
check_actors(
&east_inventory,
"wasmcloud.azurecr.io/echo:0.3.7",
"echo-simple",
0,
)?;
check_providers(
&east_inventory,
"wasmcloud.azurecr.io/httpserver:0.17.0",
ExpectedCount::Exactly(0),
)?;
eprintln!("Ensuring resources stopped in west");
check_actors(
&west_inventory,
"wasmcloud.azurecr.io/message-pub:0.1.3",
"messaging-simple",
0,
)?;
check_providers(
&west_inventory,
"wasmcloud.azurecr.io/httpserver:0.18.2",
ExpectedCount::Exactly(0),
)?;
check_providers(
&west_inventory,
"wasmcloud.azurecr.io/nats_messaging:0.17.2",
ExpectedCount::Exactly(0),
)?;
Ok(())
})
.await;
Ok(())
}

450
nottests/e2e_upgrades.rs Normal file
View File

@ -0,0 +1,450 @@
#![cfg(feature = "_e2e_tests")]
use std::path::PathBuf;
use std::time::Duration;
use futures::{FutureExt, StreamExt};
use wadm::server::{DeployResult, PutResult, StatusType};
mod e2e;
mod helpers;
use e2e::{assert_status, check_actors, check_providers, ClientInfo, ExpectedCount};
use helpers::{ECHO_ACTOR_ID, HTTP_SERVER_PROVIDER_ID};
use crate::e2e::check_status;
const MANIFESTS_PATH: &str = "test/data";
const DOCKER_COMPOSE_FILE: &str = "test/docker-compose-e2e-upgrade.yaml";
const KV_COUNTER_ACTOR_ID: &str = "MCFMFDWFHGKELOXPCNCDXKK5OFLHBVEWRAOXR5JSQUD2TOFRE3DFPM7E";
const KV_REDIS_PROVIDER_ID: &str = "VAZVC4RX54J2NVCMCW7BPCAHGGG5XZXDBXFUMDUXGESTMQEJLC3YVZWB";
#[cfg(feature = "_e2e_tests")]
#[tokio::test(flavor = "multi_thread")]
async fn run_upgrade_tests() {
let root_dir =
PathBuf::from(std::env::var("CARGO_MANIFEST_DIR").expect("Unable to find repo root"));
let manifest_dir = root_dir.join(MANIFESTS_PATH);
// NOTE(brooksmtownsend) reusing the e2e docker compose file for now but I'll only
// really be concerned with the application on a single host.
let compose_file = root_dir.join(DOCKER_COMPOSE_FILE);
let mut client_info = ClientInfo::new(manifest_dir, compose_file).await;
client_info.add_ctl_client("default", None).await;
client_info.launch_wadm().await;
// Wait for the first event on the lattice prefix before we start deploying and checking
// statuses. Wadm can absolutely handle hosts starting before you start the wadm process, but the first event
// on the lattice will initialize the lattice monitor and for the following test we quickly assert things.
let mut sub = client_info
.client
.subscribe("wadm.evt.default".to_string())
.await
.expect("Should be able to subscribe to default events");
let _ = sub.next().await;
// Wait for hosts to start
let mut did_start = false;
for _ in 0..10 {
match client_info.ctl_client("default").get_hosts().await {
Ok(hosts) if hosts.len() == 1 => {
eprintln!("Host {}/1 currently available", hosts.len());
did_start = true;
break;
}
Ok(hosts) => {
eprintln!(
"Waiting for host to be available {}/1 currently available",
hosts.len()
);
}
Err(e) => {
eprintln!("Error when fetching hosts: {e}",)
}
}
tokio::time::sleep(Duration::from_secs(1)).await;
}
if !did_start {
panic!("Hosts didn't start")
}
test_upgrade(&client_info).boxed().await;
}
async fn test_upgrade(client_info: &ClientInfo) {
let stream = client_info.get_status_stream().await;
stream
.purge()
.await
.expect("shouldn't have errored purging stream");
let resp = client_info
.put_manifest_from_file("outdatedapp.yaml", None, None)
.await;
assert_ne!(
resp.result,
PutResult::Error,
"Shouldn't have errored when creating manifest: {resp:?}"
);
let resp = client_info
.deploy_manifest("updateapp", None, None, None)
.await;
assert_ne!(
resp.result,
DeployResult::Error,
"Shouldn't have errored when deploying manifest: {resp:?}"
);
// Once manifest is deployed, first status should be compensating
check_status(&stream, "default", "updateapp", StatusType::Reconciling)
.await
.unwrap();
assert_status(None, Some(7), || async {
let inventory = client_info.get_all_inventory("default").await?;
check_actors(
&inventory,
"wasmcloud.azurecr.io/xkcd:0.1.1",
"updateapp",
5,
)?;
check_actors(
&inventory,
"wasmcloud.azurecr.io/echo:0.3.4",
"updateapp",
3,
)?;
check_actors(
&inventory,
"wasmcloud.azurecr.io/kvcounter:0.4.0",
"updateapp",
3,
)?;
check_providers(
&inventory,
"wasmcloud.azurecr.io/httpserver:0.17.0",
ExpectedCount::Exactly(1),
)?;
check_providers(
&inventory,
"wasmcloud.azurecr.io/kvredis:0.22.0",
ExpectedCount::Exactly(1),
)?;
// Oh no a sleep! How horrible!
// Actually, this is a good thing! If we reach this point because the httpserver
// provider upgraded really quickly, that means we still have to wait 5 seconds
// for the provider health check to trigger linkdef creation. So, after everything
// gets created, give the linkdef scaler time to react to the provider health check.
tokio::time::sleep(Duration::from_secs(5)).await;
let links = client_info
.ctl_client("default")
.query_links()
.await
.map_err(|e| anyhow::anyhow!("{e:?}"))?;
println!("Links: {:?}", links);
if !links.iter().any(|ld| {
ld.component_id == ECHO_ACTOR_ID
&& ld.provider_id == HTTP_SERVER_PROVIDER_ID
&& ld.contract_id == "wasmcloud:httpserver"
&& ld
.values
.get("address")
.map(|v| v == "0.0.0.0:8080")
.expect("Linkdef values should have an address")
}) {
anyhow::bail!(
"Link between echo actor and http provider should exist on port 8080: {:#?}",
links
)
}
if !links.iter().any(|ld| {
ld.component_id == KV_COUNTER_ACTOR_ID
&& ld.provider_id == KV_REDIS_PROVIDER_ID
&& ld.contract_id == "wasmcloud:keyvalue"
&& ld
.values
.get("URL")
.map(|v| v == "redis://127.0.0.1:6379")
.expect("Linkdef values should have a redis URL")
}) {
anyhow::bail!(
"Link between kvcounter actor and redis provider should exist: {:#?}",
links
)
}
check_status(&stream, "default", "updateapp", StatusType::Deployed)
.await
.unwrap();
Ok(())
})
.await;
// Deploy updated manifest
let resp = client_info
.put_manifest_from_file("upgradedapp.yaml", None, None)
.await;
assert_ne!(
resp.result,
PutResult::Error,
"Shouldn't have errored when creating manifest: {resp:?}"
);
let resp = client_info
.deploy_manifest("updateapp", None, None, Some("v0.0.2"))
.await;
assert_ne!(
resp.result,
DeployResult::Error,
"Shouldn't have errored when deploying manifest: {resp:?}"
);
// Once manifest is updated, status should be compensating
check_status(&stream, "default", "updateapp", StatusType::Reconciling)
.await
.unwrap();
assert_status(None, None, || async {
let inventory = client_info.get_all_inventory("default").await?;
println!("Inventory: {:?}", inventory);
check_actors(
&inventory,
"wasmcloud.azurecr.io/xkcd:0.1.1",
"updateapp",
5,
)?;
check_actors(
&inventory,
"wasmcloud.azurecr.io/message-pub:0.1.3",
"updateapp",
1,
)?;
check_actors(
&inventory,
"wasmcloud.azurecr.io/echo:0.3.8",
"updateapp",
3,
)?;
check_actors(
&inventory,
"wasmcloud.azurecr.io/kvcounter:0.4.0",
"updateapp",
0,
)?;
check_providers(
&inventory,
"wasmcloud.azurecr.io/httpserver:0.19.0",
ExpectedCount::Exactly(1),
)?;
check_providers(
&inventory,
"wasmcloud.azurecr.io/kvredis:0.22.0",
ExpectedCount::Exactly(0),
)?;
// Oh no a sleep! How horrible!
// Actually, this is a good thing! If we reach this point because the httpserver
// provider upgraded really quickly, that means we still have to wait 5 seconds
// for the provider health check to trigger linkdef creation. So, after everything
// gets created, give the linkdef scaler time to react to the provider health check.
tokio::time::sleep(Duration::from_secs(5)).await;
let links = client_info
.ctl_client("default")
.query_links()
.await
.map_err(|e| anyhow::anyhow!("{e:?}"))?;
if !links.iter().any(|ld| {
ld.component_id == ECHO_ACTOR_ID
&& ld.provider_id == HTTP_SERVER_PROVIDER_ID
&& ld.contract_id == "wasmcloud:httpserver"
&& ld
.values
.get("address")
.map(|v| v == "0.0.0.0:8082")
.expect("Linkdef values should have an address")
}) {
anyhow::bail!(
"Link between echo actor and http provider should exist on port 8082: {:#?}",
links
)
}
if links.iter().any(|ld| {
ld.component_id == KV_COUNTER_ACTOR_ID
&& ld.provider_id == KV_REDIS_PROVIDER_ID
&& ld.contract_id == "wasmcloud:keyvalue"
}) {
anyhow::bail!(
"Link between kvcounter actor and redis provider should not exist: {:#?}",
links
)
}
check_status(&stream, "default", "updateapp", StatusType::Deployed)
.await
.unwrap();
Ok(())
})
.await;
// Deploy another updated manifest -- this time just w/ link values and provider config modifications
let resp = client_info
.put_manifest_from_file("upgradedapp2.yaml", None, None)
.await;
assert_ne!(
resp.result,
PutResult::Error,
"Shouldn't have errored when creating manifest: {resp:?}"
);
let resp = client_info
.deploy_manifest("updateapp", None, None, Some("v0.0.3"))
.await;
assert_ne!(
resp.result,
DeployResult::Error,
"Shouldn't have errored when deploying manifest: {resp:?}"
);
// Once manifest is updated, status should be compensating
check_status(&stream, "default", "updateapp", StatusType::Reconciling)
.await
.unwrap();
assert_status(None, None, || async {
let inventory = client_info.get_all_inventory("default").await?;
println!("Inventory: {:?}", inventory);
check_actors(
&inventory,
"wasmcloud.azurecr.io/xkcd:0.1.1",
"updateapp",
5,
)?;
check_actors(
&inventory,
"wasmcloud.azurecr.io/message-pub:0.1.3",
"updateapp",
1,
)?;
check_actors(
&inventory,
"wasmcloud.azurecr.io/echo:0.3.8",
"updateapp",
3,
)?;
check_actors(
&inventory,
"wasmcloud.azurecr.io/kvcounter:0.4.0",
"updateapp",
0,
)?;
check_providers(
&inventory,
"wasmcloud.azurecr.io/httpserver:0.19.0",
ExpectedCount::Exactly(1),
)?;
check_providers(
&inventory,
"wasmcloud.azurecr.io/kvredis:0.22.0",
ExpectedCount::Exactly(0),
)?;
// Oh no a sleep! How horrible!
// Actually, this is a good thing! If we reach this point because the httpserver
// provider upgraded really quickly, that means we still have to wait 5 seconds
// for the provider health check to trigger linkdef creation. So, after everything
// gets created, give the linkdef scaler time to react to the provider health check.
tokio::time::sleep(Duration::from_secs(5)).await;
let links = client_info
.ctl_client("default")
.query_links()
.await
.map_err(|e| anyhow::anyhow!("{e:?}"))?;
if !links.iter().any(|ld| {
ld.component_id == ECHO_ACTOR_ID
&& ld.provider_id == HTTP_SERVER_PROVIDER_ID
&& ld.contract_id == "wasmcloud:httpserver"
&& ld
.values
.get("address")
.map(|v| v == "0.0.0.0:8088")
.expect("Linkdef values should have an address")
}) {
anyhow::bail!(
"Link between echo actor and http provider should exist on port 8088: {:#?}",
links
)
}
if links.iter().any(|ld| {
ld.component_id == KV_COUNTER_ACTOR_ID
&& ld.provider_id == KV_REDIS_PROVIDER_ID
&& ld.contract_id == "wasmcloud:keyvalue"
}) {
anyhow::bail!(
"Link between kvcounter actor and redis provider should not exist: {:#?}",
links
)
}
check_status(&stream, "default", "updateapp", StatusType::Deployed)
.await
.unwrap();
Ok(())
})
.await;
let resp = client_info
.put_manifest_from_file("upgradedapp3.yaml", None, None)
.await;
assert_ne!(
resp.result,
PutResult::Error,
"Shouldn't have errored when creating manifest: {resp:?}"
);
let resp = client_info
.deploy_manifest("dontupdateapp", None, None, Some("v0.0.1"))
.await;
assert_eq!(
resp.result,
DeployResult::Error,
"Should have errored when deploying manifest: {resp:?}"
);
assert_status(None, None, || async {
let inventory = client_info.get_all_inventory("default").await?;
println!("Inventory: {:?}", inventory);
check_providers(
&inventory,
"wasmcloud.azurecr.io/httpserver:0.19.0",
ExpectedCount::Exactly(1),
)?;
check_providers(
&inventory,
"wasmcloud.azurecr.io/httpserver:0.17.0",
ExpectedCount::Exactly(0),
)?;
Ok(())
})
.await;
}

View File

@ -1,5 +1,5 @@
use anyhow::Result;
use futures::{Stream, TryStreamExt};
use serial_test::serial;
use tokio::time::{timeout, Duration};
use wadm::{
@ -8,13 +8,13 @@ use wadm::{
};
mod helpers;
use helpers::{
setup_env, HELLO_COMPONENT_ID, HELLO_IMAGE_REF, HTTP_SERVER_COMPONENT_ID, HTTP_SERVER_IMAGE_REF,
};
use helpers::{setup_test_wash, TestWashConfig, ECHO_ACTOR_ID, HTTP_SERVER_PROVIDER_ID};
const WASI: &str = "wasi";
const HTTP: &str = "http";
const HTTP_INTERFACE: &str = "incoming-handler";
use anyhow::Result;
const HTTP_SERVER_REFERENCE: &str = "wasmcloud.azurecr.io/httpserver:0.17.0";
const ECHO_REFERENCE: &str = "wasmcloud.azurecr.io/echo:0.3.4";
const CONTRACT_ID: &str = "wasmcloud:httpserver";
const WASMBUS_EVENT_TOPIC: &str = "wasmbus.evt.default.>";
const STREAM_NAME: &str = "test_wadm_events";
@ -23,7 +23,10 @@ const DEFAULT_TIMEOUT_DURATION: Duration = Duration::from_secs(10);
// Link operations take a slightly longer time to work through
const LINK_OPERATION_TIMEOUT_DURATION: Duration = Duration::from_secs(30);
async fn get_event_consumer(client: async_nats::Client) -> EventConsumer {
async fn get_event_consumer(nats_url: String) -> EventConsumer {
let client = async_nats::connect(&nats_url)
.await
.expect("Unable to setup nats event consumer client");
let context = async_nats::jetstream::new(client);
// HACK: Other tests may create the mirror stream, which overlaps with the consumers here for
// our test, so delete it
@ -75,45 +78,33 @@ struct HostResponse {
}
#[tokio::test]
// TODO: Run in parallel once https://github.com/wasmCloud/wash/issues/402 is fixed. Please
// note this test should probably be changed to an e2e test as the order of events is somewhat flaky
#[serial]
async fn test_event_stream() -> Result<()> {
let env = setup_env()
.await
.expect("should have set up the test environment");
let nats_client = env
.nats_client()
.await
.expect("should have created a nats client for the test setup");
let config = TestWashConfig::random().await?;
let _guard = setup_test_wash(&config).await;
let mut stream = get_event_consumer(nats_client).await;
let mut stream = get_event_consumer(config.nats_url()).await;
// NOTE: the first heartbeat doesn't come for 30s so we are ignoring it for now
let ctl_port = env
.nats_port()
.await
.expect("should have received the port the nats-server is listening on")
let ctl_port = config
.nats_port
.unwrap_or(crate::helpers::DEFAULT_NATS_PORT)
.to_string();
// Start a component
helpers::run_wash_command([
"start",
"component",
HELLO_IMAGE_REF,
HELLO_COMPONENT_ID,
"--ctl-port",
&ctl_port,
])
.await
.expect("should have started the component");
// Start an actor
helpers::run_wash_command(["start", "actor", ECHO_REFERENCE, "--ctl-port", &ctl_port]).await;
let mut evt = wait_for_event(&mut stream, DEFAULT_TIMEOUT_DURATION).await;
if let Event::ComponentScaled(event) = evt.as_ref() {
if let Event::ActorsStarted(actor) = evt.as_ref() {
assert_eq!(
event.component_id, HELLO_COMPONENT_ID,
"Expected to get a scaledevent for the right component, got ID: {}",
event.component_id
actor.public_key, ECHO_ACTOR_ID,
"Expected to get a started event for the right actor, got ID: {}",
actor.public_key
);
} else {
panic!("Event wasn't a component scaled event");
panic!("Event wasn't an actor started event");
}
evt.ack().await.expect("Should be able to ack event");
@ -121,20 +112,18 @@ async fn test_event_stream() -> Result<()> {
helpers::run_wash_command([
"start",
"provider",
HTTP_SERVER_IMAGE_REF,
HTTP_SERVER_COMPONENT_ID,
HTTP_SERVER_REFERENCE,
"--ctl-port",
&ctl_port,
])
.await
.expect("should have started the provider");
.await;
let mut evt = wait_for_event(&mut stream, DEFAULT_TIMEOUT_DURATION).await;
if let Event::ProviderStarted(provider) = evt.as_ref() {
assert_eq!(
provider.provider_id, HTTP_SERVER_COMPONENT_ID,
provider.public_key, HTTP_SERVER_PROVIDER_ID,
"Expected to get a started event for the right provider, got ID: {}",
provider.provider_id
provider.public_key
);
} else {
println!("EVT: {:?}", evt);
@ -146,29 +135,25 @@ async fn test_event_stream() -> Result<()> {
helpers::run_wash_command([
"link",
"put",
HELLO_COMPONENT_ID,
HTTP_SERVER_COMPONENT_ID,
WASI,
HTTP,
"--interface",
HTTP_INTERFACE,
ECHO_ACTOR_ID,
HTTP_SERVER_PROVIDER_ID,
CONTRACT_ID,
"--ctl-port",
&ctl_port,
])
.await
.expect("should have created the link");
.await;
let mut evt = wait_for_event(&mut stream, LINK_OPERATION_TIMEOUT_DURATION).await;
if let Event::LinkdefSet(event) = evt.as_ref() {
if let Event::LinkdefSet(link) = evt.as_ref() {
assert_eq!(
event.linkdef.source_id(), HELLO_COMPONENT_ID,
"Expected to get a linkdef event for the right component and provider, got component ID: {}",
event.linkdef.source_id(),
link.linkdef.component_id, ECHO_ACTOR_ID,
"Expected to get a linkdef event for the right actor and provider, got actor ID: {}",
link.linkdef.component_id,
);
assert_eq!(
event.linkdef.target(), HTTP_SERVER_COMPONENT_ID,
"Expected to get a linkdef event for the right component and provider, got provider ID: {}",
event.linkdef.target(),
link.linkdef.provider_id, HTTP_SERVER_PROVIDER_ID,
"Expected to get a linkdef event for the right actor and provider, got provider ID: {}",
link.linkdef.provider_id,
);
} else {
panic!("Event wasn't an link set event");
@ -183,23 +168,24 @@ async fn test_event_stream() -> Result<()> {
helpers::run_wash_command([
"link",
"del",
HELLO_COMPONENT_ID,
"wasi",
"http",
"--link-name",
"default",
ECHO_ACTOR_ID,
CONTRACT_ID,
"--ctl-port",
&ctl_port,
])
.await
.expect("should have deleted the link");
.await;
let mut evt = wait_for_event(&mut stream, LINK_OPERATION_TIMEOUT_DURATION).await;
if let Event::LinkdefDeleted(event) = evt.as_ref() {
if let Event::LinkdefDeleted(link) = evt.as_ref() {
assert_eq!(
event.source_id, HELLO_COMPONENT_ID,
"Expected to get a linkdef event for the right component and provider, got component ID: {}",
event.source_id,
link.linkdef.component_id, ECHO_ACTOR_ID,
"Expected to get a linkdef event for the right actor and provider, got actor ID: {}",
link.linkdef.component_id,
);
assert_eq!(
link.linkdef.provider_id, HTTP_SERVER_PROVIDER_ID,
"Expected to get a linkdef event for the right actor and provider, got provider ID: {}",
link.linkdef.provider_id,
);
} else {
panic!("Event wasn't an link del event");
@ -208,9 +194,7 @@ async fn test_event_stream() -> Result<()> {
// Stop provider
let host_id = serde_json::from_slice::<HostResponse>(
&helpers::run_wash_command(["get", "hosts", "-o", "json", "--ctl-port", &ctl_port])
.await
.expect("should have received the host id"),
&helpers::run_wash_command(["get", "hosts", "-o", "json", "--ctl-port", &ctl_port]).await,
)
.unwrap()
.hosts[0]
@ -222,56 +206,53 @@ async fn test_event_stream() -> Result<()> {
helpers::run_wash_command([
"stop",
"provider",
HTTP_SERVER_COMPONENT_ID,
HTTP_SERVER_PROVIDER_ID,
CONTRACT_ID,
"--host-id",
&host_id,
"--ctl-port",
&ctl_port,
])
.await
.expect("should have stopped the provider");
.await;
let mut evt = wait_for_event(&mut stream, DEFAULT_TIMEOUT_DURATION).await;
if let Event::ProviderStopped(event) = evt.as_ref() {
if let Event::ProviderStopped(provider) = evt.as_ref() {
assert_eq!(
event.provider_id, HTTP_SERVER_COMPONENT_ID,
provider.public_key, HTTP_SERVER_PROVIDER_ID,
"Expected to get a stopped event for the right provider, got ID: {}",
event.provider_id
provider.public_key
);
} else {
panic!("Event wasn't an provider stopped event");
}
evt.ack().await.expect("Should be able to ack event");
// Stop a component
// Stop an actor
helpers::run_wash_command([
"stop",
"component",
HELLO_COMPONENT_ID,
"actor",
ECHO_ACTOR_ID,
"--host-id",
&host_id,
"--ctl-port",
&ctl_port,
])
.await
.expect("should have stopped component");
.await;
let mut evt = wait_for_event(&mut stream, DEFAULT_TIMEOUT_DURATION).await;
if let Event::ComponentScaled(event) = evt.as_ref() {
if let Event::ActorsStopped(actor) = evt.as_ref() {
assert_eq!(
event.component_id, HELLO_COMPONENT_ID,
"Expected to get a stopped event for the right component, got ID: {}",
event.component_id
actor.public_key, ECHO_ACTOR_ID,
"Expected to get a stopped event for the right actor, got ID: {}",
actor.public_key
);
} else {
panic!("Event wasn't a component scaled event");
panic!("Event wasn't an actor stopped event");
}
evt.ack().await.expect("Should be able to ack event");
// Stop the host
helpers::run_wash_command(["stop", "host", &host_id, "--ctl-port", &ctl_port])
.await
.expect("should have stopped the host");
helpers::run_wash_command(["stop", "host", &host_id, "--ctl-port", &ctl_port]).await;
let mut evt = wait_for_event(&mut stream, DEFAULT_TIMEOUT_DURATION).await;
if let Event::HostStopped(host) = evt.as_ref() {
@ -281,7 +262,7 @@ async fn test_event_stream() -> Result<()> {
host.id
);
} else {
panic!("Event wasn't a component scaled event");
panic!("Event wasn't an actor stopped event");
}
evt.ack().await.expect("Should be able to ack event");
@ -289,62 +270,49 @@ async fn test_event_stream() -> Result<()> {
}
#[tokio::test]
// Please note that there is problems when running this against 0.60+ hosts as
// the KV bucket for linkdefs makes it so that all those linkdefs are emitted
// TODO: Run in parallel once https://github.com/wasmCloud/wash/issues/402 is fixed. This
// does work when you run it individually. Please note that there is problems when running this
// against 0.60+ hosts as the KV bucket for linkdefs makes it so that all those linkdefs are emitted
// as published events when the host starts
#[serial]
async fn test_nack_and_rereceive() -> Result<()> {
let env = setup_env()
.await
.expect("should have set up the test environment");
let nats_client = env
.nats_client()
.await
.expect("should have created a nats client for the test setup");
let config = TestWashConfig::random().await?;
let _guard = setup_test_wash(&config).await;
let mut stream = get_event_consumer(nats_client).await;
let mut stream = get_event_consumer(config.nats_url()).await;
let ctl_port = env
.nats_port()
.await
.expect("should have received the port the nats-server is listening on")
let ctl_port = config
.nats_port
.unwrap_or(crate::helpers::DEFAULT_NATS_PORT)
.to_string();
// Start a component
helpers::run_wash_command([
"start",
"component",
HELLO_IMAGE_REF,
HELLO_COMPONENT_ID,
"--ctl-port",
&ctl_port,
])
.await
.expect("should have started the component");
// Start an actor
helpers::run_wash_command(["start", "actor", ECHO_REFERENCE, "--ctl-port", &ctl_port]).await;
// Get the event and then nack it
let mut evt = wait_for_event(&mut stream, DEFAULT_TIMEOUT_DURATION).await;
// Make sure we got the right event
if let Event::ComponentScaled(component) = evt.as_ref() {
if let Event::ActorsStarted(actor) = evt.as_ref() {
assert_eq!(
component.component_id, HELLO_COMPONENT_ID,
"Expected to get a started event for the right component, got ID: {}",
component.component_id
actor.public_key, ECHO_ACTOR_ID,
"Expected to get a started event for the right actor, got ID: {}",
actor.public_key
);
} else {
panic!("Event wasn't a component scaled event");
panic!("Event wasn't an actor started event");
}
evt.nack().await;
// Now do it again and make sure we get the same event
let mut evt = wait_for_event(&mut stream, DEFAULT_TIMEOUT_DURATION).await;
if let Event::ComponentScaled(component) = evt.as_ref() {
if let Event::ActorsStarted(actor) = evt.as_ref() {
assert_eq!(
component.component_id, HELLO_COMPONENT_ID,
"Expected to get a started event for the right component, got ID: {}",
component.component_id
actor.public_key, ECHO_ACTOR_ID,
"Expected to get a started event for the right actor, got ID: {}",
actor.public_key
);
} else {
panic!("Event wasn't a component scaled event");
panic!("Event wasn't an actor started event");
}
evt.ack().await.expect("Should be able to ack event");

View File

@ -1,567 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "Manifest",
"description": "Manifest file based on the Open Application Model (OAM) specification for declaratively managing wasmCloud applications",
"type": "object",
"required": [
"apiVersion",
"kind",
"metadata",
"spec"
],
"properties": {
"apiVersion": {
"description": "The OAM version of the manifest",
"type": "string"
},
"kind": {
"description": "The kind or type of manifest described by the spec",
"type": "string"
},
"metadata": {
"description": "Metadata describing the manifest",
"allOf": [
{
"$ref": "#/definitions/Metadata"
}
]
},
"spec": {
"description": "The specification for this manifest",
"allOf": [
{
"$ref": "#/definitions/Specification"
}
]
}
},
"additionalProperties": false,
"definitions": {
"CapabilityProperties": {
"type": "object",
"properties": {
"application": {
"description": "Information to locate a component within a shared application. Cannot be specified if the image is specified.",
"anyOf": [
{
"$ref": "#/definitions/SharedApplicationComponentProperties"
},
{
"type": "null"
}
]
},
"config": {
"description": "Named configuration to pass to the provider. The merged set of configuration will be passed to the provider at runtime using the provider SDK's `init()` function.",
"type": "array",
"items": {
"$ref": "#/definitions/ConfigProperty"
}
},
"id": {
"description": "The component ID to use for this provider. If not supplied, it will be generated as a combination of the [Metadata::name] and the image reference.",
"type": [
"string",
"null"
]
},
"image": {
"description": "The image reference to use. Required unless the component is a shared component that is defined in another shared application.",
"type": [
"string",
"null"
]
},
"secrets": {
"description": "Named secret references to pass to the t. The provider will be able to retrieve these values at runtime using `wasmcloud:secrets/store`.",
"type": "array",
"items": {
"$ref": "#/definitions/SecretProperty"
}
}
},
"additionalProperties": false
},
"Component": {
"description": "A component definition",
"type": "object",
"oneOf": [
{
"type": "object",
"required": [
"properties",
"type"
],
"properties": {
"properties": {
"$ref": "#/definitions/ComponentProperties"
},
"type": {
"type": "string",
"enum": [
"component"
]
}
}
},
{
"type": "object",
"required": [
"properties",
"type"
],
"properties": {
"properties": {
"$ref": "#/definitions/CapabilityProperties"
},
"type": {
"type": "string",
"enum": [
"capability"
]
}
}
}
],
"required": [
"name"
],
"properties": {
"name": {
"description": "The name of this component",
"type": "string"
},
"traits": {
"description": "A list of various traits assigned to this component",
"type": [
"array",
"null"
],
"items": {
"$ref": "#/definitions/Trait"
}
}
}
},
"ComponentProperties": {
"type": "object",
"properties": {
"application": {
"description": "Information to locate a component within a shared application. Cannot be specified if the image is specified.",
"anyOf": [
{
"$ref": "#/definitions/SharedApplicationComponentProperties"
},
{
"type": "null"
}
]
},
"config": {
"description": "Named configuration to pass to the component. The component will be able to retrieve these values at runtime using `wasi:runtime/config.`",
"type": "array",
"items": {
"$ref": "#/definitions/ConfigProperty"
}
},
"id": {
"description": "The component ID to use for this component. If not supplied, it will be generated as a combination of the [Metadata::name] and the image reference.",
"type": [
"string",
"null"
]
},
"image": {
"description": "The image reference to use. Required unless the component is a shared component that is defined in another shared application.",
"type": [
"string",
"null"
]
},
"secrets": {
"description": "Named secret references to pass to the component. The component will be able to retrieve these values at runtime using `wasmcloud:secrets/store`.",
"type": "array",
"items": {
"$ref": "#/definitions/SecretProperty"
}
}
},
"additionalProperties": false
},
"ConfigDefinition": {
"type": "object",
"properties": {
"config": {
"type": "array",
"items": {
"$ref": "#/definitions/ConfigProperty"
}
},
"secrets": {
"type": "array",
"items": {
"$ref": "#/definitions/SecretProperty"
}
}
}
},
"ConfigProperty": {
"description": "Properties for the config list associated with components, providers, and links\n\n## Usage Defining a config block, like so: ```yaml source_config: - name: \"external-secret-kv\" - name: \"default-port\" properties: port: \"8080\" ```\n\nWill result in two config scalers being created, one with the name `basic-kv` and one with the name `default-port`. Wadm will not resolve collisions with configuration names between manifests.",
"type": "object",
"required": [
"name"
],
"properties": {
"name": {
"description": "Name of the config to ensure exists",
"type": "string"
},
"properties": {
"description": "Optional properties to put with the configuration. If the properties are omitted in the manifest, wadm will assume that the configuration is externally managed and will not attempt to create it, only reporting the status as failed if not found.",
"type": [
"object",
"null"
],
"additionalProperties": {
"type": "string"
}
}
},
"additionalProperties": false
},
"LinkProperty": {
"description": "Properties for links",
"type": "object",
"required": [
"interfaces",
"namespace",
"package",
"target"
],
"properties": {
"interfaces": {
"description": "WIT interfaces for the link",
"type": "array",
"items": {
"type": "string"
}
},
"name": {
"description": "The name of this link",
"type": [
"string",
"null"
]
},
"namespace": {
"description": "WIT namespace for the link",
"type": "string"
},
"package": {
"description": "WIT package for the link",
"type": "string"
},
"source": {
"description": "Configuration to apply to the source of the link",
"anyOf": [
{
"$ref": "#/definitions/ConfigDefinition"
},
{
"type": "null"
}
]
},
"source_config": {
"deprecated": true,
"writeOnly": true,
"type": [
"array",
"null"
],
"items": {
"$ref": "#/definitions/ConfigProperty"
}
},
"target": {
"description": "Configuration to apply to the target of the link",
"allOf": [
{
"$ref": "#/definitions/TargetConfig"
}
]
},
"target_config": {
"deprecated": true,
"writeOnly": true,
"type": [
"array",
"null"
],
"items": {
"$ref": "#/definitions/ConfigProperty"
}
}
},
"additionalProperties": false
},
"Metadata": {
"description": "The metadata describing the manifest",
"type": "object",
"required": [
"annotations",
"name"
],
"properties": {
"annotations": {
"description": "Optional data for annotating this manifest see <https://github.com/oam-dev/spec/blob/master/metadata.md#annotations-format>",
"type": "object",
"additionalProperties": {
"type": "string"
}
},
"labels": {
"description": "Optional data for labeling this manifest, see <https://github.com/oam-dev/spec/blob/master/metadata.md#label-format>",
"type": "object",
"additionalProperties": {
"type": "string"
}
},
"name": {
"description": "The name of the manifest. This must be unique per lattice",
"type": "string"
}
}
},
"Policy": {
"description": "A policy definition",
"type": "object",
"required": [
"name",
"properties",
"type"
],
"properties": {
"name": {
"description": "The name of this policy",
"type": "string"
},
"properties": {
"description": "The properties for this policy",
"type": "object",
"additionalProperties": {
"type": "string"
}
},
"type": {
"description": "The type of the policy",
"type": "string"
}
}
},
"SecretProperty": {
"type": "object",
"required": [
"name",
"properties"
],
"properties": {
"name": {
"description": "The name of the secret. This is used by a reference by the component or capability to get the secret value as a resource.",
"type": "string"
},
"properties": {
"description": "The properties of the secret that indicate how to retrieve the secret value from a secrets backend and which backend to actually query.",
"allOf": [
{
"$ref": "#/definitions/SecretSourceProperty"
}
]
}
}
},
"SecretSourceProperty": {
"type": "object",
"required": [
"key",
"policy"
],
"properties": {
"field": {
"description": "The field to use for retrieving the secret from the backend. This is optional and can be used to retrieve a specific field from a secret.",
"type": [
"string",
"null"
]
},
"key": {
"description": "The key to use for retrieving the secret from the backend.",
"type": "string"
},
"policy": {
"description": "The policy to use for retrieving the secret.",
"type": "string"
},
"version": {
"description": "The version of the secret to retrieve. If not supplied, the latest version will be used.",
"type": [
"string",
"null"
]
}
}
},
"SharedApplicationComponentProperties": {
"type": "object",
"required": [
"component",
"name"
],
"properties": {
"component": {
"description": "The name of the component in the shared application",
"type": "string"
},
"name": {
"description": "The name of the shared application",
"type": "string"
}
}
},
"Specification": {
"description": "A representation of an OAM specification",
"type": "object",
"required": [
"components"
],
"properties": {
"components": {
"description": "The list of components for describing an application",
"type": "array",
"items": {
"$ref": "#/definitions/Component"
}
},
"policies": {
"description": "The list of policies describing an application. This is for providing application-wide setting such as configuration for a secrets backend, how to render Kubernetes services, etc. It can be omitted if no policies are needed for an application.",
"type": "array",
"items": {
"$ref": "#/definitions/Policy"
}
}
}
},
"Spread": {
"description": "Configuration for various spreading requirements",
"type": "object",
"required": [
"name",
"requirements"
],
"properties": {
"name": {
"description": "The name of this spread requirement",
"type": "string"
},
"requirements": {
"description": "An arbitrary map of labels to match on for scaling requirements",
"type": "object",
"additionalProperties": {
"type": "string"
}
},
"weight": {
"description": "An optional weight for this spread. Higher weights are given more precedence",
"type": [
"integer",
"null"
],
"format": "uint",
"minimum": 0.0
}
},
"additionalProperties": false
},
"SpreadScalerProperty": {
"description": "Properties for spread scalers",
"type": "object",
"required": [
"instances"
],
"properties": {
"instances": {
"description": "Number of instances to spread across matching requirements",
"type": "integer",
"format": "uint",
"minimum": 0.0
},
"spread": {
"description": "Requirements for spreading those instances",
"type": "array",
"items": {
"$ref": "#/definitions/Spread"
}
}
},
"additionalProperties": false
},
"TargetConfig": {
"type": "object",
"required": [
"name"
],
"properties": {
"config": {
"type": "array",
"items": {
"$ref": "#/definitions/ConfigProperty"
}
},
"name": {
"description": "The target this link applies to. This should be the name of a component in the manifest",
"type": "string"
},
"secrets": {
"type": "array",
"items": {
"$ref": "#/definitions/SecretProperty"
}
}
}
},
"Trait": {
"type": "object",
"required": [
"properties",
"type"
],
"properties": {
"properties": {
"description": "The properties of this trait",
"allOf": [
{
"$ref": "#/definitions/TraitProperty"
}
]
},
"type": {
"description": "The type of trait specified. This should be a unique string for the type of scaler. As we plan on supporting custom scalers, these traits are not enumerated",
"type": "string"
}
},
"additionalProperties": false
},
"TraitProperty": {
"description": "Properties for defining traits",
"anyOf": [
{
"$ref": "#/definitions/LinkProperty"
},
{
"$ref": "#/definitions/SpreadScalerProperty"
},
true
]
}
}
}

View File

@ -6,7 +6,7 @@ The wasmCloud Application Deployment Manager uses the [Open Application Model](h
The following is a list of the `component`s wasmCloud has added to the model.
- `component` - A WebAssembly component
- `actor` - An actor
- `provider` - A capability provider
## wasmCloud OAM Traits
@ -14,15 +14,11 @@ The following is a list of the `component`s wasmCloud has added to the model.
The following is a list of the `traits` wasmCloud has added via customization to its application model.
- `spreadscaler` - Defines the spread of instances of a particular entity across multiple hosts with affinity requirements
- `link` - A link definition that describes a link between a component and a capability provider or a component and another component
## JSON Schema
A JSON schema is automatically generated from our Rust structures and is at the root of the repository: [oam.schema.json](../oam.schema.json). You can regenerate the `oam.schema.json` file by running `cargo run --bin wadm-schema`.
- `linkdef` - A link definition that describes a link between an actor and a capability provider
## Example Application YAML
The following is an example YAML file describing an application
The following is an example YAML file describing an ALC application
```yaml
apiVersion: core.oam.dev/v1beta1
@ -30,7 +26,8 @@ kind: Application
metadata:
name: my-example-app
annotations:
description: 'This is my app revision 2'
version: v0.0.1
description: "This is my app"
spec:
components:
- name: userinfo
@ -50,28 +47,25 @@ spec:
requirements:
zone: us-west-1
weight: 20
- type: linkdef
properties:
target: webcap
values:
port: "8080"
- name: webcap
type: capability
properties:
contract: wasmcloud:httpserver
image: wasmcloud.azurecr.io/httpserver:0.13.1
traits:
- type: link
properties:
target:
name: userinfo
config: []
namespace: wasi
package: http
interfaces:
- incoming-handler
source:
config: []
link_name: default
- name: ledblinky
type: capability
properties:
image: wasmcloud.azurecr.io/ledblinky:0.0.1
contract: wasmcloud:blinkenlights
# default link name is "default"
traits:
- type: spreadscaler
properties:
@ -79,6 +73,6 @@ spec:
spread:
- name: haslights
requirements:
ledenabled: 'true'
# default weight is 100
ledenabled: "true"
# default weight is 100
```

View File

@ -3,13 +3,13 @@ kind: Application
metadata:
name: config-example
annotations:
description: 'This is my app'
description: "This is my app"
spec:
components:
- name: http
type: component
properties:
image: ghcr.io/wasmcloud/components/http-hello-world-rust:0.1.0
image: wasmcloud.azurecr.io/http-hello-world:0.1.0
# You can pass any config data you'd like sent to your component as a string->string map
config:
- name: component_config
@ -19,10 +19,10 @@ spec:
- name: webcap
type: capability
properties:
image: ghcr.io/wasmcloud/http-server:0.23.0
image: ghcr.io/wasmcloud/http-server:0.20.0
# You can pass any config data you'd like sent to your provider as a string->string map
config:
- name: provider_config
properties:
default-port: '8080'
cache_file: '/tmp/mycache.json'
default-port: "8080"
cache_file: "/tmp/mycache.json"

View File

@ -27,14 +27,12 @@ spec:
traits:
- type: link
properties:
target:
name: userinfo
target: userinfo
namespace: wasi
package: http
interfaces:
- incoming-handler
source:
config:
- name: default-port
properties:
port: "8080"
source_config:
- name: default-port
properties:
port: "8080"

View File

@ -3,7 +3,7 @@ kind: Application
metadata:
name: echo
annotations:
description: 'This is my app'
description: "This is my app"
spec:
components:
- name: echo
@ -18,6 +18,7 @@ spec:
- name: httpserver
type: capability
properties:
contract: wasmcloud:httpserver
image: wasmcloud.azurecr.io/httpserver:0.17.0
traits:
- type: spreadscaler
@ -25,14 +26,12 @@ spec:
instances: 1
- type: link
properties:
target:
name: echo
target: echo
namespace: wasi
package: http
interfaces:
- incoming-handler
source:
config:
- name: default-port
properties:
address: 0.0.0.0:8080
source_config:
- name: default-port
properties:
address: 0.0.0.0:8080

View File

@ -11,28 +11,27 @@ spec:
type: component
properties:
# Run components from OCI registries as below or from a local .wasm component binary.
image: ghcr.io/wasmcloud/components/http-hello-world-rust:0.1.0
image: wasmcloud.azurecr.io/http-hello-world:0.1.0
traits:
# One replica of this component will run
- type: spreadscaler
properties:
instances: 1
replicas: 1
# The httpserver capability provider, started from the official wasmCloud OCI artifact
- name: httpserver
type: capability
properties:
image: ghcr.io/wasmcloud/http-server:0.23.0
image: ghcr.io/wasmcloud/http-server:0.20.0
traits:
# Link the HTTP server and set it to listen on the local machine's port 8080
- type: link
properties:
target:
name: http-component
target: http-component
namespace: wasi
package: http
interfaces: [incoming-handler]
source:
config:
- name: default-http
properties:
ADDRESS: 127.0.0.1:8080
source_config:
- name: default-http
properties:
ADDRESS: 127.0.0.1:8080

View File

@ -3,7 +3,7 @@ kind: Application
metadata:
name: kvcounter-rust
annotations:
description: 'Kvcounter demo in Rust, using the WebAssembly Component Model and WebAssembly Interfaces Types (WIT)'
description: "Kvcounter demo in Rust, using the WebAssembly Component Model and WebAssembly Interfaces Types (WIT)"
labels:
app.oam.io/name: kvcounter-rust
spec:
@ -13,46 +13,42 @@ spec:
properties:
image: file:///Users/brooks/github.com/wasmcloud/wadm/kvc/build/http_hello_world_s.wasm
traits:
# Govern the spread/scheduling of the component
# Govern the spread/scheduling of the actor
- type: spreadscaler
properties:
instances: 1
replicas: 1
# Compose with KVRedis for wasi:keyvalue calls
- type: link
properties:
target:
name: kvredis
config:
- name: redis-connect-local
properties:
url: redis://127.0.0.1:6379
target: kvredis
namespace: wasi
package: keyvalue
interfaces:
- atomic
- eventual
target_config:
- name: redis-connect-local
properties:
url: redis://127.0.0.1:6379
# Add a capability provider that mediates HTTP access
- name: httpserver
type: capability
properties:
image: ghcr.io/wasmcloud/http-server:0.23.0
image: ghcr.io/wasmcloud/http-server:0.20.0
traits:
# Compose with component to handle wasi:http calls
- type: link
properties:
target:
name: kvcounter
target: kvcounter
namespace: wasi
package: http
interfaces:
- incoming-handler
source:
config:
- name: listen-config
properties:
address: 127.0.0.1:8080
source_config:
- name: listen-config
properties:
address: 127.0.0.1:8080
# Add a capability provider that interfaces with the Redis key-value store
- name: kvredis
type: capability

46
oam/kvcounter_old.yaml Normal file
View File

@ -0,0 +1,46 @@
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: rust-kvcounter
annotations:
version: v0.0.1
description: "HTTP hello world demo in Rust, using the WebAssembly Component Model and WebAssembly Interfaces Types (WIT)"
experimental: true
spec:
components:
- name: kvcounter
type: component
properties:
image: file://./build/http_hello_world_s.wasm
traits:
# Govern the spread/scheduling of the actor
- type: spreadscaler
properties:
replicas: 1
# Link the HTTP server, and inform it to listen on port 8080
# on the local machine
- type: linkdef
properties:
target: httpserver
values:
ADDRESS: 127.0.0.1:8080
# Link to Redis
- type: linkdef
properties:
target: keyvalue
values:
URL: redis://127.0.0.1:6379
# Add a capability provider that mediates HTTP access
- name: httpserver
type: capability
properties:
image: wasmcloud.azurecr.io/httpserver:0.19.1
contract: wasmcloud:httpserver
link_name: default
# Add a capability provider that interfaces with the Redis key-value store
- name: kvredis
type: capability
properties:
image: ghcr.io/wasmcloud/keyvalue-redis:0.23.0
contract: wasmcloud:keyvalue

411
oam/oam.schema.json Normal file
View File

@ -0,0 +1,411 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"$id": "http://oam.dev/v1/oam.application_configuration.schema.json",
"title": "Manifest",
"description": "A JSON Schema to validate wasmCloud Application Deployment Manager (WADM) manifests",
"type": "object",
"properties": {
"apiVersion": {
"type": "string",
"description": "The specific version of the Open Application Model specification in use"
},
"kind": {
"type": "string",
"description": "The entity type being described in the manifest"
},
"metadata": {
"type": "object",
"description": "Application configuration metadata.",
"properties": {
"name": {
"type": "string"
},
"annotations": {
"type": "object",
"description": "A set of string key/value pairs used as arbitrary annotations on this application configuration.",
"properties": {
"description": {
"type": "string"
}
},
"additionalProperties": {
"type": "string"
}
}
}
},
"spec": {
"type": "object",
"description": "Configuration attributes for various items in the lattice",
"$ref": "#/definitions/manifestSpec"
}
},
"required": [
"apiVersion",
"kind",
"metadata",
"spec"
],
"additionalProperties": false,
"definitions": {
"manifestSpec": {
"type": "object",
"properties": {
"components": {
"type": "array",
"description": "Component instance definitions.",
"items": {
"type": "object",
"anyOf": [
{
"$ref": "#/definitions/wasmComponent"
},
{
"$ref": "#/definitions/providerComponent"
}
]
}
}
},
"required": [
"components"
],
"additionalProperties": false
},
"opconfigVariable": {
"type": "object",
"description": "The Variables section defines variables that may be used elsewhere in the application configuration. The variable section provides a way for an application operator to specify common values that can be substituted into multiple other locations in this configuration (using the [fromVariable(VARNAME)] syntax).",
"properties": {
"name": {
"type": "string",
"description": "The parameter's name. Must be unique per configuration.",
"$comment": "Some systems have upper bounds for name length. Do we limit here?",
"maxLength": 128
},
"value": {
"type": "string",
"description": "The scalar value."
}
},
"required": [
"name",
"value"
],
"additionalProperties": false
},
"applicationScope": {
"type": "object",
"description": "The scope section defines application scopes that will be created with this application configuration.",
"properties": {
"name": {
"type": "string",
"description": "The name of the application scope. Must be unique to the deployment environment."
},
"type": {
"type": "string",
"description": "The fully-qualified GROUP/VERSION.KIND name of the application scope."
},
"properties": {
"type": "object",
"description": "The properties attached to this scope.",
"$ref": "#/definitions/propertiesObject"
}
},
"required": [
"name",
"type"
],
"additionalProperties": false
},
"wasmComponent": {
"type": "object",
"description": "This section defines the instances of components to create with this application configuration.",
"properties": {
"name": {
"type": "string",
"description": "The name of the component to create an instance of."
},
"type": {
"description": "The type of instance : component.",
"anyOf": [
{
"const": "component"
},
{
"const": "actor",
"$comment": "Deprecated: use 'component' instead"
}
]
},
"properties": {
"type": "object",
"description": "Overrides of parameters that are exposed by the application scope type defined in 'type'.",
"$ref": "#/definitions/componentProperties"
},
"traits": {
"type": "array",
"description": "Specifies the traits to attach to this component instance.",
"items": {
"$ref": "#/definitions/trait"
}
}
},
"required": [
"name",
"type",
"properties"
],
"additionalProperties": true
},
"providerComponent": {
"type": "object",
"description": "This section defines the instances of providers to create with this application configuration.",
"properties": {
"name": {
"type": "string",
"description": "The name of the provider to create an instance of."
},
"type": {
"description": "The type of instance: capability.",
"const": "capability"
},
"properties": {
"type": "object",
"description": "Overrides of parameters that are exposed by the application scope type defined in 'type'.",
"$ref": "#/definitions/providerProperties"
},
"traits": {
"type": "array",
"description": "Specifies the traits to attach to this component instance.",
"items": {
"$ref": "#/definitions/trait"
}
}
},
"required": [
"name",
"type",
"properties"
],
"additionalProperties": true
},
"componentProperties": {
"type": "object",
"description": "Values supplied to parameters that are used to override the parameters exposed by other types.",
"properties": {
"image": {
"type": "string",
"description": "The image reference to use for the component.",
"$comment": "Some systems have upper bounds for name length. Do we limit here?",
"maxLength": 512
},
"id": {
"type": "string",
"description": "The component identifier to use for the component. Will be autogenerated if not supplied.",
"maxLength": 64
},
"config": {
"type": "array",
"items": {
"$ref": "#/definitions/configProperty"
},
"default": [],
"description": "Configuration properties for the provider"
}
},
"required": [
"image"
],
"additionalProperties": false
},
"providerProperties": {
"type": "object",
"description": "Values supplied to parameters that are used to override the parameters exposed by other types.",
"properties": {
"image": {
"type": "string",
"description": "The image reference to use for the provider.",
"$comment": "Some systems have upper bounds for name length. Do we limit here?",
"maxLength": 512
},
"id": {
"type": "string",
"description": "The component identifier to use for the provider.",
"maxLength": 64
},
"config": {
"type": "array",
"items": {
"$ref": "#/definitions/configProperty"
},
"default": [],
"description": "Configuration properties for the provider"
}
},
"required": [
"image"
],
"additionalProperties": false
},
"trait": {
"type": "object",
"description": "The trait section defines traits that will be used in a component instance.",
"properties": {
"type": {
"type": "string",
"description": "The trait type for the instance, whether spreadscaler or link"
},
"properties": {
"type": "object",
"description": "Overrides of parameters that are exposed by the trait type defined in 'type'.",
"anyOf": [
{
"$ref": "#/definitions/linkProperties"
},
{
"$ref": "#/definitions/spreadscalerProperties"
}
]
}
},
"required": [
"type",
"properties"
],
"additionalProperties": false
},
"configProperty": {
"type": "object",
"properties": {
"name": {
"type": "string"
},
"properties": {
"type": "object",
"additionalProperties": {
"type": "string"
}
}
},
"required": [
"name"
],
"additionalProperties": false
},
"linkProperties": {
"target": {
"type": "string",
"description": "The target this link applies to. This should be the name of a component in the manifest"
},
"namespace": {
"type": "string",
"description": "WIT namespace for the link"
},
"package": {
"type": "string",
"description": "WIT package for the link"
},
"interfaces": {
"type": "array",
"items": {
"type": "string"
},
"description": "WIT interfaces for the link"
},
"source_config": {
"type": "array",
"items": {
"$ref": "#/definitions/configProperty"
},
"default": [],
"description": "Configuration properties for the source of the link"
},
"target_config": {
"type": "array",
"items": {
"$ref": "#/definitions/configProperty"
},
"default": [],
"description": "Configuration properties for the target of the link"
},
"name": {
"type": "string",
"description": "The name of this link",
"default": null
},
"required": [
"target",
"namespace",
"package",
"interfaces"
]
},
"spreadscalerProperties": {
"type": "object",
"description": "A properties object (for spreadscaler configuration) is an object whose structure is determined by the spreadscaler property schema. It may be a simple value, or it may be a complex object.",
"properties": {
"instances": {
"anyOf": [
{
"type": "integer",
"title": "instances"
},
{
"type": "integer",
"title": "replicas"
}
]
},
"spread": {
"type": "array",
"items": {
"type": "object",
"description": "A spread object for spreading replicas.",
"properties": {
"name": {
"type": "string"
},
"requirements": {
"additionalProperties": {
"type": "string"
}
},
"weight": {
"type": "integer"
}
},
"required": [
"name",
"requirements"
]
}
}
},
"oneOf": [
{
"required": [
"instances"
]
},
{
"required": [
"replicas"
]
}
]
},
"propertiesObject": {
"anyOf": [
{
"type": "object",
"description": "A properties object (for trait and scope configuration) is an object whose structure is determined by the trait or scope property schema. It may be a simple value, or it may be a complex object.",
"additionalProperties": true
},
{
"type": "string",
"description": "A properties object (for trait and scope configuration) is an object whose structure is determined by the trait or scope property schema. It may be a simple value, or it may be a complex object."
}
]
}
}
}

View File

@ -53,7 +53,9 @@
"target": "webcap",
"namespace": "wasi",
"package": "http",
"interfaces": ["incoming-handler"],
"interfaces": [
"incoming-handler"
],
"name": "default"
}
}
@ -84,4 +86,4 @@
}
]
}
}
}

View File

@ -31,8 +31,7 @@ spec:
traits:
- type: link
properties:
target:
name: webcap
target: webcap
namespace: wasi
package: http
interfaces: ["incoming-handler"]

View File

@ -31,15 +31,13 @@ spec:
traits:
- type: link
properties:
target:
name: userinfo
config: []
target: userinfo
namespace: wasi
package: http
interfaces:
- incoming-handler
source:
config: []
source_config: []
target_config: []
- name: ledblinky
type: capability

View File

@ -1,49 +0,0 @@
# Copied from https://github.com/wasmCloud/wasmCloud/blob/main/examples/rust/components/sqldb-postgres-query/wadm.yaml
---
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: rust-sqldb-postgres-query
annotations:
version: v0.0.1
description: |
Demo WebAssembly component using the wasmCloud SQLDB Postgres provider via the wasmcloud:postgres WIT interface
wasmcloud.dev/authors: wasmCloud team
wasmcloud.dev/source-url: https://github.com/wasmCloud/wasmCloud/blob/main/examples/rust/components/sqldb-postgres-quer/wadm.yaml
wasmcloud.dev/readme-md-url: https://github.com/wasmCloud/wasmCloud/blob/main/examples/rust/components/sqldb-postgres-quer/README.md
wasmcloud.dev/homepage: https://github.com/wasmCloud/wasmCloud/tree/main/examples/rust/components/sqldb-postgres-quer
wasmcloud.dev/categories: |
database,sqldb,postgres,rust,example
spec:
components:
- name: querier
type: component
properties:
# To use the locally compiled code in this folder, use the line below instead after running `wash build`:
# image: file://./build/sqldb_postgres_query_s.wasm
image: ghcr.io/wasmcloud/components/sqldb-postgres-query-rust:0.1.0
traits:
# Govern the spread/scheduling of the component
- type: spreadscaler
properties:
instances: 1
# Establish a unidirectional link to the `sqldb-postgres` provider (the sqldb provider),
# so the `querier` component can make use of sqldb functionality provided Postgres
# (i.e. reading/writing to a database)
- type: link
properties:
target:
name: sqldb-postgres
config:
- name: default-postgres
namespace: wasmcloud
package: postgres
interfaces: [query]
# Add a capability provider that interacts with the filesystem
- name: sqldb-postgres
type: capability
properties:
image: ghcr.io/wasmcloud/sqldb-postgres:0.2.0
config:
- name: 'default-postgres'

View File

@ -1,3 +0,0 @@
[toolchain]
channel = "stable"
components = ["clippy", "rust-src", "rustfmt"]

View File

@ -1,10 +1,9 @@
use std::io::IsTerminal;
use opentelemetry::sdk::{
trace::{IdGenerator, Sampler},
Resource,
};
use opentelemetry_otlp::{Protocol, WithExportConfig};
use std::io::IsTerminal;
use tracing::{Event as TracingEvent, Subscriber};
use tracing_subscriber::fmt::{
format::{Format, Full, Json, JsonFields, Writer},
@ -103,9 +102,10 @@ pub fn configure_tracing(
}
}
fn get_log_layer<S>(structured_logging: bool) -> Box<dyn Layer<S> + Send + Sync + 'static>
fn get_log_layer<S: for<'a> tracing_subscriber::registry::LookupSpan<'a>>(
structured_logging: bool,
) -> Box<dyn Layer<S> + Send + Sync + 'static>
where
S: for<'a> tracing_subscriber::registry::LookupSpan<'a>,
S: tracing::Subscriber,
{
let log_layer = tracing_subscriber::fmt::layer()

View File

@ -1,40 +1,428 @@
use anyhow::Context as _;
use clap::Parser;
use wadm::{config::WadmConfig, start_wadm};
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use async_nats::jetstream::{stream::Stream, Context};
use clap::Parser;
use tokio::sync::Semaphore;
use tracing::log::debug;
use wadm_types::api::DEFAULT_WADM_TOPIC_PREFIX;
use wadm::{
consumers::{
manager::{ConsumerManager, WorkerCreator},
*,
},
mirror::Mirror,
nats_utils::LatticeIdParser,
scaler::manager::{ScalerManager, WADM_NOTIFY_PREFIX},
server::{ManifestNotifier, Server},
storage::{nats_kv::NatsKvStore, reaper::Reaper},
workers::{CommandPublisher, CommandWorker, EventWorker, StatusPublisher},
DEFAULT_COMMANDS_TOPIC, DEFAULT_EVENTS_TOPIC, DEFAULT_MULTITENANT_EVENTS_TOPIC,
DEFAULT_STATUS_TOPIC, DEFAULT_WADM_EVENTS_TOPIC,
};
mod connections;
mod logging;
mod nats;
mod observer;
use connections::ControlClientConstructor;
const EVENT_STREAM_NAME: &str = "wadm_events";
const COMMAND_STREAM_NAME: &str = "wadm_commands";
const STATUS_STREAM_NAME: &str = "wadm_status";
const MIRROR_STREAM_NAME: &str = "wadm_mirror";
const MULTITENANT_MIRROR_STREAM_NAME: &str = "wadm_multitenant_mirror";
const NOTIFY_STREAM_NAME: &str = "wadm_notify";
#[derive(Parser, Debug)]
#[command(name = clap::crate_name!(), version = clap::crate_version!(), about = "wasmCloud Application Deployment Manager", long_about = None)]
struct Args {
/// The ID for this wadm process. Defaults to a random UUIDv4 if none is provided. This is used
/// to help with debugging when identifying which process is doing the work
#[arg(short = 'i', long = "host-id", env = "WADM_HOST_ID")]
host_id: Option<String>,
/// Whether or not to use structured log output (as JSON)
#[arg(
short = 'l',
long = "structured-logging",
default_value = "false",
env = "WADM_STRUCTURED_LOGGING"
)]
structured_logging: bool,
/// Whether or not to enable opentelemetry tracing
#[arg(
short = 't',
long = "tracing",
default_value = "false",
env = "WADM_TRACING_ENABLED"
)]
tracing_enabled: bool,
/// The endpoint to use for tracing. Setting this flag enables tracing, even if --tracing is set
/// to false. Defaults to http://localhost:4318/v1/traces if not set and tracing is enabled
#[arg(short = 'e', long = "tracing-endpoint", env = "WADM_TRACING_ENDPOINT")]
tracing_endpoint: Option<String>,
/// The NATS JetStream domain to connect to
#[arg(short = 'd', env = "WADM_JETSTREAM_DOMAIN")]
domain: Option<String>,
/// (Advanced) Tweak the maximum number of jobs to run for handling events and commands. Be
/// careful how you use this as it can affect performance
#[arg(short = 'j', long = "max-jobs", env = "WADM_MAX_JOBS")]
max_jobs: Option<usize>,
/// The URL of the nats server you want to connect to
#[arg(
short = 's',
long = "nats-server",
env = "WADM_NATS_SERVER",
default_value = "127.0.0.1:4222"
)]
nats_server: String,
/// Use the specified nkey file or seed literal for authentication. Must be used in conjunction with --nats-jwt
#[arg(
long = "nats-seed",
env = "WADM_NATS_NKEY",
conflicts_with = "nats_creds",
requires = "nats_jwt"
)]
nats_seed: Option<String>,
/// Use the specified jwt file or literal for authentication. Must be used in conjunction with --nats-nkey
#[arg(
long = "nats-jwt",
env = "WADM_NATS_JWT",
conflicts_with = "nats_creds",
requires = "nats_seed"
)]
nats_jwt: Option<String>,
/// (Optional) NATS credential file to use when authenticating
#[arg(
long = "nats-creds-file",
env = "WADM_NATS_CREDS_FILE",
conflicts_with_all = ["nats_seed", "nats_jwt"],
)]
nats_creds: Option<PathBuf>,
/// (Optional) NATS TLS certificate file to use when authenticating
#[arg(long = "nats-tls-ca-file", env = "WADM_NATS_TLS_CA_FILE")]
nats_tls_ca_file: Option<PathBuf>,
/// Name of the bucket used for storage of lattice state
#[arg(
long = "state-bucket-name",
env = "WADM_STATE_BUCKET_NAME",
default_value = "wadm_state"
)]
state_bucket: String,
/// The amount of time in seconds to give for hosts to fail to heartbeat and be removed from the
/// store. By default, this is 120s because it is 4x the host heartbeat interval
#[arg(
long = "cleanup-interval",
env = "WADM_CLEANUP_INTERVAL",
default_value = "120"
)]
cleanup_interval: u64,
/// The API topic prefix to use. This is an advanced setting that should only be used if you
/// know what you are doing
#[arg(
long = "api-prefix",
env = "WADM_API_PREFIX",
default_value = DEFAULT_WADM_TOPIC_PREFIX
)]
api_prefix: String,
/// This prefix to used for the internal streams. When running in a multitenant environment,
/// clients share the same JS domain (since messages need to come from lattices).
/// Setting a stream prefix makes it possible to have a separate stream for different wadms running in a multitenant environment.
/// This is an advanced setting that should only be used if you know what you are doing.
#[arg(long = "stream-prefix", env = "WADM_STREAM_PREFIX")]
stream_prefix: Option<String>,
/// Name of the bucket used for storage of manifests
#[arg(
long = "manifest-bucket-name",
env = "WADM_MANIFEST_BUCKET_NAME",
default_value = "wadm_manifests"
)]
manifest_bucket: String,
/// Run wadm in multitenant mode. This is for advanced multitenant use cases with segmented NATS
/// account traffic and not simple cases where all lattices use credentials from the same
/// account. See the deployment guide for more information
#[arg(long = "multitenant", env = "WADM_MULTITENANT", hide = true)]
multitenant: bool,
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let args = WadmConfig::parse();
let args = Args::parse();
logging::configure_tracing(
args.structured_logging,
args.tracing_enabled,
args.tracing_endpoint.clone(),
args.tracing_endpoint,
);
let mut wadm = start_wadm(args).await.context("failed to run wadm")?;
tokio::select! {
res = wadm.join_next() => {
match res {
Some(Ok(_)) => {
tracing::info!("WADM has exited successfully");
std::process::exit(0);
}
Some(Err(e)) => {
tracing::error!("WADM has exited with an error: {:?}", e);
std::process::exit(1);
}
None => {
tracing::info!("WADM server did not start");
std::process::exit(0);
}
// Build storage adapter for lattice state (on by default)
let (client, context) = nats::get_client_and_context(
args.nats_server.clone(),
args.domain.clone(),
args.nats_seed.clone(),
args.nats_jwt.clone(),
args.nats_creds.clone(),
args.nats_tls_ca_file.clone(),
)
.await?;
// TODO: We will probably need to set up all the flags (like lattice prefix and topic prefix) down the line
let connection_pool = ControlClientConstructor::new(client.clone(), None);
let trimmer: &[_] = &['.', '>', '*'];
let store = nats::ensure_kv_bucket(&context, args.state_bucket, 1).await?;
let state_storage = NatsKvStore::new(store);
let manifest_storage = nats::ensure_kv_bucket(&context, args.manifest_bucket, 1).await?;
debug!("Ensuring event stream");
let internal_stream_name = |stream_name: &str| -> String {
match args.stream_prefix.clone() {
Some(stream_prefix) => {
format!(
"{}.{}",
stream_prefix.trim_end_matches(trimmer),
stream_name
)
}
None => stream_name.to_string(),
}
_ = tokio::signal::ctrl_c() => {
tracing::info!("Received Ctrl+C, shutting down");
std::process::exit(0);
};
let event_stream = nats::ensure_stream(
&context,
internal_stream_name(EVENT_STREAM_NAME),
vec![DEFAULT_WADM_EVENTS_TOPIC.to_owned()],
Some(
"A stream that stores all events coming in on the wasmbus.evt topics in a cluster"
.to_string(),
),
)
.await?;
debug!("Ensuring command stream");
let command_stream = nats::ensure_stream(
&context,
internal_stream_name(COMMAND_STREAM_NAME),
vec![DEFAULT_COMMANDS_TOPIC.to_owned()],
Some("A stream that stores all commands for wadm".to_string()),
)
.await?;
let status_stream = nats::ensure_status_stream(
&context,
internal_stream_name(STATUS_STREAM_NAME),
vec![DEFAULT_STATUS_TOPIC.to_owned()],
)
.await?;
let (event_stream_topics, mirror_stream) = if args.multitenant {
debug!("Running in multitenant mode");
(
vec![DEFAULT_MULTITENANT_EVENTS_TOPIC.to_owned()],
MULTITENANT_MIRROR_STREAM_NAME,
)
} else {
(vec![DEFAULT_EVENTS_TOPIC.to_owned()], MIRROR_STREAM_NAME)
};
debug!("Ensuring mirror stream");
let mirror_stream = nats::ensure_stream(
&context,
internal_stream_name(mirror_stream),
event_stream_topics.clone(),
Some("A stream that publishes all events to the same stream".to_string()),
)
.await?;
debug!("Ensuring notify stream");
let notify_stream = nats::ensure_notify_stream(
&context,
NOTIFY_STREAM_NAME.to_owned(),
vec![format!("{WADM_NOTIFY_PREFIX}.*")],
)
.await?;
debug!("Creating event consumer manager");
let permit_pool = Arc::new(Semaphore::new(
args.max_jobs.unwrap_or(Semaphore::MAX_PERMITS),
));
let event_worker_creator = EventWorkerCreator {
state_store: state_storage.clone(),
manifest_store: manifest_storage.clone(),
pool: connection_pool.clone(),
command_topic_prefix: DEFAULT_COMMANDS_TOPIC.trim_matches(trimmer).to_owned(),
publisher: context.clone(),
notify_stream,
status_stream: status_stream.clone(),
};
let events_manager: ConsumerManager<EventConsumer> = ConsumerManager::new(
permit_pool.clone(),
event_stream,
event_worker_creator.clone(),
args.multitenant,
)
.await;
debug!("Creating command consumer manager");
let command_worker_creator = CommandWorkerCreator {
pool: connection_pool,
};
let commands_manager: ConsumerManager<CommandConsumer> = ConsumerManager::new(
permit_pool.clone(),
command_stream,
command_worker_creator.clone(),
args.multitenant,
)
.await;
// TODO(thomastaylor312): We might want to figure out how not to run this globally. Doing a
// synthetic event sent to the stream could be nice, but all the wadm processes would still fire
// off that tick, resulting in multiple people handling. We could maybe get it to work with the
// right duplicate window, but we have no idea when each process could fire a tick. Worst case
// scenario right now is that multiple fire simultaneously and a few of them just delete nothing
let reaper = Reaper::new(
state_storage.clone(),
Duration::from_secs(args.cleanup_interval / 2),
[],
);
let wadm_event_prefix = DEFAULT_WADM_EVENTS_TOPIC.trim_matches(trimmer);
debug!("Creating lattice observer");
let observer = observer::Observer {
parser: LatticeIdParser::new("wasmbus", args.multitenant),
command_manager: commands_manager,
event_manager: events_manager,
mirror: Mirror::new(mirror_stream, wadm_event_prefix),
reaper,
client: client.clone(),
command_worker_creator,
event_worker_creator,
};
debug!("Subscribing to API topic");
let server = Server::new(
manifest_storage,
client,
Some(&args.api_prefix),
args.multitenant,
status_stream,
ManifestNotifier::new(wadm_event_prefix, context),
)
.await?;
tokio::select! {
res = server.serve() => {
res?
}
res = observer.observe(event_stream_topics) => {
res?
}
_ = tokio::signal::ctrl_c() => {}
}
Ok(())
}
#[derive(Clone)]
struct CommandWorkerCreator {
pool: ControlClientConstructor,
}
#[async_trait::async_trait]
impl WorkerCreator for CommandWorkerCreator {
type Output = CommandWorker;
async fn create(
&self,
lattice_id: &str,
multitenant_prefix: Option<&str>,
) -> anyhow::Result<Self::Output> {
let client = self.pool.get_connection(lattice_id, multitenant_prefix);
Ok(CommandWorker::new(client))
}
}
#[derive(Clone)]
struct EventWorkerCreator<StateStore> {
state_store: StateStore,
manifest_store: async_nats::jetstream::kv::Store,
pool: ControlClientConstructor,
command_topic_prefix: String,
publisher: Context,
notify_stream: Stream,
status_stream: Stream,
}
#[async_trait::async_trait]
impl<StateStore> WorkerCreator for EventWorkerCreator<StateStore>
where
StateStore: wadm::storage::Store + Send + Sync + Clone + 'static,
{
type Output = EventWorker<StateStore, wasmcloud_control_interface::Client, Context>;
async fn create(
&self,
lattice_id: &str,
multitenant_prefix: Option<&str>,
) -> anyhow::Result<Self::Output> {
let client = self.pool.get_connection(lattice_id, multitenant_prefix);
let command_publisher = CommandPublisher::new(
self.publisher.clone(),
&format!("{}.{lattice_id}", self.command_topic_prefix),
);
let status_publisher = StatusPublisher::new(
self.publisher.clone(),
Some(self.status_stream.clone()),
&format!("wadm.status.{lattice_id}"),
);
let manager = ScalerManager::new(
self.publisher.clone(),
self.notify_stream.clone(),
lattice_id,
multitenant_prefix,
self.state_store.clone(),
self.manifest_store.clone(),
command_publisher.clone(),
status_publisher.clone(),
client.clone(),
)
.await?;
Ok(EventWorker::new(
self.state_store.clone(),
client,
command_publisher,
status_publisher,
manager,
))
}
}

Some files were not shown because too many files have changed in this diff Show More