Compare commits

...

260 Commits

Author SHA1 Message Date
dependabot[bot] abf0702404 chore(deps): bump nkeys from 0.4.4 to 0.4.5
Bumps [nkeys](https://github.com/wasmcloud/nkeys) from 0.4.4 to 0.4.5.
- [Release notes](https://github.com/wasmcloud/nkeys/releases)
- [Commits](https://github.com/wasmcloud/nkeys/compare/v0.4.4...v0.4.5)

---
updated-dependencies:
- dependency-name: nkeys
  dependency-version: 0.4.5
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-07-08 17:53:54 -06:00
Brooks Townsend 67d8b25f27 chore(chart): bump to latest app version
Signed-off-by: Brooks Townsend <brooks@cosmonic.com>
2025-06-18 11:03:44 -04:00
dependabot[bot] b376c3ae2b chore(deps): bump softprops/action-gh-release from 2.2.2 to 2.3.2
Bumps [softprops/action-gh-release](https://github.com/softprops/action-gh-release) from 2.2.2 to 2.3.2.
- [Release notes](https://github.com/softprops/action-gh-release/releases)
- [Changelog](https://github.com/softprops/action-gh-release/blob/master/CHANGELOG.md)
- [Commits](da05d55257...72f2c25fcb)

---
updated-dependencies:
- dependency-name: softprops/action-gh-release
  dependency-version: 2.3.2
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-06-16 14:04:52 -06:00
dependabot[bot] eec6ca1c03 chore(deps): bump clap from 4.5.39 to 4.5.40
Bumps [clap](https://github.com/clap-rs/clap) from 4.5.39 to 4.5.40.
- [Release notes](https://github.com/clap-rs/clap/releases)
- [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md)
- [Commits](https://github.com/clap-rs/clap/compare/clap_complete-v4.5.39...clap_complete-v4.5.40)

---
updated-dependencies:
- dependency-name: clap
  dependency-version: 4.5.40
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-06-16 13:59:42 -06:00
dependabot[bot] cf9ef590b3 chore(deps): bump taiki-e/install-action from 2.52.7 to 2.53.0
Bumps [taiki-e/install-action](https://github.com/taiki-e/install-action) from 2.52.7 to 2.53.0.
- [Release notes](https://github.com/taiki-e/install-action/releases)
- [Changelog](https://github.com/taiki-e/install-action/blob/main/CHANGELOG.md)
- [Commits](92f69c1952...cfe1303741)

---
updated-dependencies:
- dependency-name: taiki-e/install-action
  dependency-version: 2.53.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-06-16 13:58:36 -06:00
dependabot[bot] 2009753535 chore(deps): bump taiki-e/install-action from 2.52.4 to 2.52.7
Bumps [taiki-e/install-action](https://github.com/taiki-e/install-action) from 2.52.4 to 2.52.7.
- [Release notes](https://github.com/taiki-e/install-action/releases)
- [Changelog](https://github.com/taiki-e/install-action/blob/main/CHANGELOG.md)
- [Commits](735e593394...92f69c1952)

---
updated-dependencies:
- dependency-name: taiki-e/install-action
  dependency-version: 2.52.7
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-06-10 20:14:19 -06:00
dependabot[bot] 6ffc096379 chore(deps): bump hyper-util from 0.1.13 to 0.1.14
Bumps [hyper-util](https://github.com/hyperium/hyper-util) from 0.1.13 to 0.1.14.
- [Release notes](https://github.com/hyperium/hyper-util/releases)
- [Changelog](https://github.com/hyperium/hyper-util/blob/master/CHANGELOG.md)
- [Commits](https://github.com/hyperium/hyper-util/compare/v0.1.13...v0.1.14)

---
updated-dependencies:
- dependency-name: hyper-util
  dependency-version: 0.1.14
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-06-10 20:13:29 -06:00
dependabot[bot] 62b573183b chore(deps): bump github/codeql-action from 3.28.18 to 3.28.19
Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.28.18 to 3.28.19.
- [Release notes](https://github.com/github/codeql-action/releases)
- [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md)
- [Commits](ff0a06e83c...fca7ace96b)

---
updated-dependencies:
- dependency-name: github/codeql-action
  dependency-version: 3.28.19
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-06-10 20:11:47 -06:00
dependabot[bot] 254765a5db chore(deps): bump ossf/scorecard-action from 2.4.1 to 2.4.2
Bumps [ossf/scorecard-action](https://github.com/ossf/scorecard-action) from 2.4.1 to 2.4.2.
- [Release notes](https://github.com/ossf/scorecard-action/releases)
- [Changelog](https://github.com/ossf/scorecard-action/blob/main/RELEASE.md)
- [Commits](f49aabe0b5...05b42c6244)

---
updated-dependencies:
- dependency-name: ossf/scorecard-action
  dependency-version: 2.4.2
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-06-06 14:21:01 -06:00
dependabot[bot] 9ad8b52ffe chore(deps): bump clap from 4.5.38 to 4.5.39
Bumps [clap](https://github.com/clap-rs/clap) from 4.5.38 to 4.5.39.
- [Release notes](https://github.com/clap-rs/clap/releases)
- [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md)
- [Commits](https://github.com/clap-rs/clap/compare/clap_complete-v4.5.38...clap_complete-v4.5.39)

---
updated-dependencies:
- dependency-name: clap
  dependency-version: 4.5.39
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-06-06 14:20:46 -06:00
dependabot[bot] cc394fb963 chore(deps): bump hyper-util from 0.1.12 to 0.1.13
Bumps [hyper-util](https://github.com/hyperium/hyper-util) from 0.1.12 to 0.1.13.
- [Release notes](https://github.com/hyperium/hyper-util/releases)
- [Changelog](https://github.com/hyperium/hyper-util/blob/master/CHANGELOG.md)
- [Commits](https://github.com/hyperium/hyper-util/compare/v0.1.12...v0.1.13)

---
updated-dependencies:
- dependency-name: hyper-util
  dependency-version: 0.1.13
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-06-06 14:20:37 -06:00
dependabot[bot] 4f0be1c2ec chore(deps): bump docker/build-push-action from 6.17.0 to 6.18.0
Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 6.17.0 to 6.18.0.
- [Release notes](https://github.com/docker/build-push-action/releases)
- [Commits](1dc7386353...263435318d)

---
updated-dependencies:
- dependency-name: docker/build-push-action
  dependency-version: 6.18.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-06-06 14:20:21 -06:00
dependabot[bot] c6177f1ec0 chore(deps): bump taiki-e/install-action from 2.52.1 to 2.52.4
Bumps [taiki-e/install-action](https://github.com/taiki-e/install-action) from 2.52.1 to 2.52.4.
- [Release notes](https://github.com/taiki-e/install-action/releases)
- [Changelog](https://github.com/taiki-e/install-action/blob/main/CHANGELOG.md)
- [Commits](6c6479b498...735e593394)

---
updated-dependencies:
- dependency-name: taiki-e/install-action
  dependency-version: 2.52.4
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-06-06 14:19:38 -06:00
dependabot[bot] 9ab6ef3f3a chore(deps): bump hyper-util from 0.1.11 to 0.1.12
Bumps [hyper-util](https://github.com/hyperium/hyper-util) from 0.1.11 to 0.1.12.
- [Release notes](https://github.com/hyperium/hyper-util/releases)
- [Changelog](https://github.com/hyperium/hyper-util/blob/master/CHANGELOG.md)
- [Commits](https://github.com/hyperium/hyper-util/compare/v0.1.11...v0.1.12)

---
updated-dependencies:
- dependency-name: hyper-util
  dependency-version: 0.1.12
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-05-28 16:10:04 -04:00
dependabot[bot] aab70fa276 chore(deps): bump mlugg/setup-zig from 2.0.0 to 2.0.1
Bumps [mlugg/setup-zig](https://github.com/mlugg/setup-zig) from 2.0.0 to 2.0.1.
- [Release notes](https://github.com/mlugg/setup-zig/releases)
- [Commits](aa9ad5c14e...7dccf5e6d0)

---
updated-dependencies:
- dependency-name: mlugg/setup-zig
  dependency-version: 2.0.1
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-05-28 16:08:09 -04:00
dependabot[bot] 04862520cb chore(deps): bump taiki-e/install-action from 2.51.2 to 2.52.1
Bumps [taiki-e/install-action](https://github.com/taiki-e/install-action) from 2.51.2 to 2.52.1.
- [Release notes](https://github.com/taiki-e/install-action/releases)
- [Changelog](https://github.com/taiki-e/install-action/blob/main/CHANGELOG.md)
- [Commits](941e8a4d9d...6c6479b498)

---
updated-dependencies:
- dependency-name: taiki-e/install-action
  dependency-version: 2.52.1
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-05-28 16:07:52 -04:00
dependabot[bot] d24a275f69 chore(deps): bump uuid from 1.16.0 to 1.17.0
Bumps [uuid](https://github.com/uuid-rs/uuid) from 1.16.0 to 1.17.0.
- [Release notes](https://github.com/uuid-rs/uuid/releases)
- [Commits](https://github.com/uuid-rs/uuid/compare/v1.16.0...v1.17.0)

---
updated-dependencies:
- dependency-name: uuid
  dependency-version: 1.17.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-05-28 16:07:37 -04:00
dependabot[bot] dc85b32bed chore(deps): bump tokio from 1.45.0 to 1.45.1
Bumps [tokio](https://github.com/tokio-rs/tokio) from 1.45.0 to 1.45.1.
- [Release notes](https://github.com/tokio-rs/tokio/releases)
- [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.45.0...tokio-1.45.1)

---
updated-dependencies:
- dependency-name: tokio
  dependency-version: 1.45.1
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-05-28 16:07:21 -04:00
dependabot[bot] a5a61d2749 chore(deps): bump tokio from 1.44.2 to 1.45.0
Bumps [tokio](https://github.com/tokio-rs/tokio) from 1.44.2 to 1.45.0.
- [Release notes](https://github.com/tokio-rs/tokio/releases)
- [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.44.2...tokio-1.45.0)

---
updated-dependencies:
- dependency-name: tokio
  dependency-version: 1.45.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-05-20 09:42:00 -04:00
dependabot[bot] c065b3e17e chore(deps): bump clap from 4.5.37 to 4.5.38
Bumps [clap](https://github.com/clap-rs/clap) from 4.5.37 to 4.5.38.
- [Release notes](https://github.com/clap-rs/clap/releases)
- [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md)
- [Commits](https://github.com/clap-rs/clap/compare/clap_complete-v4.5.37...clap_complete-v4.5.38)

---
updated-dependencies:
- dependency-name: clap
  dependency-version: 4.5.38
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-05-19 21:39:12 -06:00
dependabot[bot] 4239d6d898
chore(deps): bump github/codeql-action from 3.28.17 to 3.28.18 (#669)
Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.28.17 to 3.28.18.
- [Release notes](https://github.com/github/codeql-action/releases)
- [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md)
- [Commits](60168efe1c...ff0a06e83c)

---
updated-dependencies:
- dependency-name: github/codeql-action
  dependency-version: 3.28.18
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-05-19 08:56:32 -05:00
dependabot[bot] d240b53a5d
chore(deps): bump docker/build-push-action from 6.16.0 to 6.17.0 (#668)
Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 6.16.0 to 6.17.0.
- [Release notes](https://github.com/docker/build-push-action/releases)
- [Commits](14487ce63c...1dc7386353)

---
updated-dependencies:
- dependency-name: docker/build-push-action
  dependency-version: 6.17.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-05-19 08:44:15 -05:00
dependabot[bot] 4e014223b8
chore(deps): bump taiki-e/install-action from 2.50.10 to 2.51.2 (#667)
Bumps [taiki-e/install-action](https://github.com/taiki-e/install-action) from 2.50.10 to 2.51.2.
- [Release notes](https://github.com/taiki-e/install-action/releases)
- [Changelog](https://github.com/taiki-e/install-action/blob/main/CHANGELOG.md)
- [Commits](83254c5438...941e8a4d9d)

---
updated-dependencies:
- dependency-name: taiki-e/install-action
  dependency-version: 2.51.2
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-05-19 08:31:15 -05:00
dependabot[bot] 96aa54bd5e chore(deps): bump mlugg/setup-zig from 1.2.1 to 2.0.0
Bumps [mlugg/setup-zig](https://github.com/mlugg/setup-zig) from 1.2.1 to 2.0.0.
- [Release notes](https://github.com/mlugg/setup-zig/releases)
- [Commits](a67e68dc5c...aa9ad5c14e)

---
updated-dependencies:
- dependency-name: mlugg/setup-zig
  dependency-version: 2.0.0
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-05-12 09:38:59 -04:00
dependabot[bot] 67b1d85ba9 chore(deps): bump taiki-e/install-action from 2.50.7 to 2.50.10
Bumps [taiki-e/install-action](https://github.com/taiki-e/install-action) from 2.50.7 to 2.50.10.
- [Release notes](https://github.com/taiki-e/install-action/releases)
- [Changelog](https://github.com/taiki-e/install-action/blob/main/CHANGELOG.md)
- [Commits](86c23eed46...83254c5438)

---
updated-dependencies:
- dependency-name: taiki-e/install-action
  dependency-version: 2.50.10
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-05-12 09:36:04 -04:00
Joonas Bergius b5133163ae
chore: Switch to mlugg/setup-zig action (#662)
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2025-05-06 08:29:49 -05:00
dependabot[bot] d5a77cc74c chore(deps): bump sha2 from 0.10.8 to 0.10.9
Bumps [sha2](https://github.com/RustCrypto/hashes) from 0.10.8 to 0.10.9.
- [Commits](https://github.com/RustCrypto/hashes/compare/sha2-v0.10.8...sha2-v0.10.9)

---
updated-dependencies:
- dependency-name: sha2
  dependency-version: 0.10.9
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-05-05 22:31:25 -06:00
dependabot[bot] ef80b684ba chore(deps): bump chrono from 0.4.40 to 0.4.41
Bumps [chrono](https://github.com/chronotope/chrono) from 0.4.40 to 0.4.41.
- [Release notes](https://github.com/chronotope/chrono/releases)
- [Changelog](https://github.com/chronotope/chrono/blob/main/CHANGELOG.md)
- [Commits](https://github.com/chronotope/chrono/compare/v0.4.40...v0.4.41)

---
updated-dependencies:
- dependency-name: chrono
  dependency-version: 0.4.41
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-05-05 22:31:00 -06:00
dependabot[bot] ee40750113
chore(deps): bump testcontainers from 0.23.3 to 0.24.0 (#659)
Bumps [testcontainers](https://github.com/testcontainers/testcontainers-rs) from 0.23.3 to 0.24.0.
- [Release notes](https://github.com/testcontainers/testcontainers-rs/releases)
- [Changelog](https://github.com/testcontainers/testcontainers-rs/blob/main/CHANGELOG.md)
- [Commits](https://github.com/testcontainers/testcontainers-rs/compare/0.23.3...0.24.0)

---
updated-dependencies:
- dependency-name: testcontainers
  dependency-version: 0.24.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-05-05 19:04:41 -05:00
dependabot[bot] 73dc76b72a
chore(deps): bump taiki-e/install-action from 2.50.3 to 2.50.7 (#657)
Bumps [taiki-e/install-action](https://github.com/taiki-e/install-action) from 2.50.3 to 2.50.7.
- [Release notes](https://github.com/taiki-e/install-action/releases)
- [Changelog](https://github.com/taiki-e/install-action/blob/main/CHANGELOG.md)
- [Commits](ab3728c7ba...86c23eed46)

---
updated-dependencies:
- dependency-name: taiki-e/install-action
  dependency-version: 2.50.7
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-05-05 08:59:17 -05:00
dependabot[bot] aac1e46d0b
chore(deps): bump github/codeql-action from 3.28.16 to 3.28.17 (#658)
Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.28.16 to 3.28.17.
- [Release notes](https://github.com/github/codeql-action/releases)
- [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md)
- [Commits](28deaeda66...60168efe1c)

---
updated-dependencies:
- dependency-name: github/codeql-action
  dependency-version: 3.28.17
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-05-05 08:58:52 -05:00
dependabot[bot] e843cfb824 chore(deps): bump rand from 0.9.0 to 0.9.1
Bumps [rand](https://github.com/rust-random/rand) from 0.9.0 to 0.9.1.
- [Release notes](https://github.com/rust-random/rand/releases)
- [Changelog](https://github.com/rust-random/rand/blob/master/CHANGELOG.md)
- [Commits](https://github.com/rust-random/rand/compare/0.9.0...rand_core-0.9.1)

---
updated-dependencies:
- dependency-name: rand
  dependency-version: 0.9.1
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-04-29 14:28:33 -04:00
dependabot[bot] 0ef3162684 chore(deps): bump github/codeql-action from 3.28.15 to 3.28.16
Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.28.15 to 3.28.16.
- [Release notes](https://github.com/github/codeql-action/releases)
- [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md)
- [Commits](45775bd823...28deaeda66)

---
updated-dependencies:
- dependency-name: github/codeql-action
  dependency-version: 3.28.16
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-04-29 14:28:10 -04:00
dependabot[bot] 726a6c0bc7 chore(deps): bump actions/download-artifact from 4.2.1 to 4.3.0
Bumps [actions/download-artifact](https://github.com/actions/download-artifact) from 4.2.1 to 4.3.0.
- [Release notes](https://github.com/actions/download-artifact/releases)
- [Commits](95815c38cf...d3f86a106a)

---
updated-dependencies:
- dependency-name: actions/download-artifact
  dependency-version: 4.3.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-04-29 14:28:02 -04:00
dependabot[bot] f1a3acbf1e chore(deps): bump actions/setup-python from 5.5.0 to 5.6.0
Bumps [actions/setup-python](https://github.com/actions/setup-python) from 5.5.0 to 5.6.0.
- [Release notes](https://github.com/actions/setup-python/releases)
- [Commits](8d9ed9ac5c...a26af69be9)

---
updated-dependencies:
- dependency-name: actions/setup-python
  dependency-version: 5.6.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-04-29 14:27:48 -04:00
dependabot[bot] e92e526dfe chore(deps): bump docker/build-push-action from 6.15.0 to 6.16.0
Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 6.15.0 to 6.16.0.
- [Release notes](https://github.com/docker/build-push-action/releases)
- [Commits](471d1dc4e0...14487ce63c)

---
updated-dependencies:
- dependency-name: docker/build-push-action
  dependency-version: 6.16.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-04-29 14:27:33 -04:00
dependabot[bot] 15ae8c4d6a chore(deps): bump taiki-e/install-action from 2.49.50 to 2.50.3
Bumps [taiki-e/install-action](https://github.com/taiki-e/install-action) from 2.49.50 to 2.50.3.
- [Release notes](https://github.com/taiki-e/install-action/releases)
- [Changelog](https://github.com/taiki-e/install-action/blob/main/CHANGELOG.md)
- [Commits](09dc018eee...ab3728c7ba)

---
updated-dependencies:
- dependency-name: taiki-e/install-action
  dependency-version: 2.50.3
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-04-29 14:27:18 -04:00
dependabot[bot] 22fc78860f
chore(deps): bump clap from 4.5.36 to 4.5.37 (#650)
Bumps [clap](https://github.com/clap-rs/clap) from 4.5.36 to 4.5.37.
- [Release notes](https://github.com/clap-rs/clap/releases)
- [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md)
- [Commits](https://github.com/clap-rs/clap/compare/clap_complete-v4.5.36...clap_complete-v4.5.37)

---
updated-dependencies:
- dependency-name: clap
  dependency-version: 4.5.37
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-04-21 10:18:24 -05:00
dependabot[bot] c7953f95e9
chore(deps): bump taiki-e/install-action from 2.49.49 to 2.49.50 (#649)
Bumps [taiki-e/install-action](https://github.com/taiki-e/install-action) from 2.49.49 to 2.49.50.
- [Release notes](https://github.com/taiki-e/install-action/releases)
- [Changelog](https://github.com/taiki-e/install-action/blob/main/CHANGELOG.md)
- [Commits](be7c31b674...09dc018eee)

---
updated-dependencies:
- dependency-name: taiki-e/install-action
  dependency-version: 2.49.50
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-04-21 10:17:46 -05:00
dependabot[bot] 7f0fc3a396
chore(deps): bump softprops/action-gh-release from 2.2.1 to 2.2.2 (#648)
Bumps [softprops/action-gh-release](https://github.com/softprops/action-gh-release) from 2.2.1 to 2.2.2.
- [Release notes](https://github.com/softprops/action-gh-release/releases)
- [Changelog](https://github.com/softprops/action-gh-release/blob/master/CHANGELOG.md)
- [Commits](c95fe14893...da05d55257)

---
updated-dependencies:
- dependency-name: softprops/action-gh-release
  dependency-version: 2.2.2
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-04-21 10:16:15 -05:00
dependabot[bot] 37b47154e3 chore(deps): bump github/codeql-action from 3.28.13 to 3.28.15
Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.28.13 to 3.28.15.
- [Release notes](https://github.com/github/codeql-action/releases)
- [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md)
- [Commits](1b549b9259...45775bd823)

---
updated-dependencies:
- dependency-name: github/codeql-action
  dependency-version: 3.28.15
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-04-14 09:53:59 -04:00
dependabot[bot] 3c8b0742a5 chore(deps): bump actions/setup-python from 5.4.0 to 5.5.0
Bumps [actions/setup-python](https://github.com/actions/setup-python) from 5.4.0 to 5.5.0.
- [Release notes](https://github.com/actions/setup-python/releases)
- [Commits](42375524e2...8d9ed9ac5c)

---
updated-dependencies:
- dependency-name: actions/setup-python
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-04-14 09:51:20 -04:00
dependabot[bot] 8a3d21ce7d chore(deps): bump clap from 4.5.32 to 4.5.36
Bumps [clap](https://github.com/clap-rs/clap) from 4.5.32 to 4.5.36.
- [Release notes](https://github.com/clap-rs/clap/releases)
- [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md)
- [Commits](https://github.com/clap-rs/clap/compare/clap_complete-v4.5.32...clap_complete-v4.5.36)

---
updated-dependencies:
- dependency-name: clap
  dependency-version: 4.5.36
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-04-14 09:50:57 -04:00
dependabot[bot] c09d40d335 chore(deps): bump tokio from 1.44.1 to 1.44.2
Bumps [tokio](https://github.com/tokio-rs/tokio) from 1.44.1 to 1.44.2.
- [Release notes](https://github.com/tokio-rs/tokio/releases)
- [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.44.1...tokio-1.44.2)

---
updated-dependencies:
- dependency-name: tokio
  dependency-version: 1.44.2
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-04-14 09:50:50 -04:00
dependabot[bot] 0748b04b60 chore(deps): bump indexmap from 2.8.0 to 2.9.0
Bumps [indexmap](https://github.com/indexmap-rs/indexmap) from 2.8.0 to 2.9.0.
- [Changelog](https://github.com/indexmap-rs/indexmap/blob/main/RELEASES.md)
- [Commits](https://github.com/indexmap-rs/indexmap/compare/2.8.0...2.9.0)

---
updated-dependencies:
- dependency-name: indexmap
  dependency-version: 2.9.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-04-14 09:50:37 -04:00
dependabot[bot] dc1955370f chore(deps): bump hyper-util from 0.1.10 to 0.1.11
Bumps [hyper-util](https://github.com/hyperium/hyper-util) from 0.1.10 to 0.1.11.
- [Release notes](https://github.com/hyperium/hyper-util/releases)
- [Changelog](https://github.com/hyperium/hyper-util/blob/master/CHANGELOG.md)
- [Commits](https://github.com/hyperium/hyper-util/compare/v0.1.10...v0.1.11)

---
updated-dependencies:
- dependency-name: hyper-util
  dependency-version: 0.1.11
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-04-14 09:50:28 -04:00
dependabot[bot] ebd113e51a chore(deps): bump anyhow from 1.0.97 to 1.0.98
Bumps [anyhow](https://github.com/dtolnay/anyhow) from 1.0.97 to 1.0.98.
- [Release notes](https://github.com/dtolnay/anyhow/releases)
- [Commits](https://github.com/dtolnay/anyhow/compare/1.0.97...1.0.98)

---
updated-dependencies:
- dependency-name: anyhow
  dependency-version: 1.0.98
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-04-14 09:50:21 -04:00
dependabot[bot] 8def8fe075 chore(deps): bump taiki-e/install-action from 2.49.34 to 2.49.49
Bumps [taiki-e/install-action](https://github.com/taiki-e/install-action) from 2.49.34 to 2.49.49.
- [Release notes](https://github.com/taiki-e/install-action/releases)
- [Changelog](https://github.com/taiki-e/install-action/blob/main/CHANGELOG.md)
- [Commits](914ac1e29d...be7c31b674)

---
updated-dependencies:
- dependency-name: taiki-e/install-action
  dependency-version: 2.49.49
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-04-14 09:50:14 -04:00
dependabot[bot] 1ae4e8e2cb chore(deps): bump Swatinem/rust-cache from 2.7.7 to 2.7.8
Bumps [Swatinem/rust-cache](https://github.com/swatinem/rust-cache) from 2.7.7 to 2.7.8.
- [Release notes](https://github.com/swatinem/rust-cache/releases)
- [Changelog](https://github.com/Swatinem/rust-cache/blob/master/CHANGELOG.md)
- [Commits](f0deed1e0e...9d47c6ad4b)

---
updated-dependencies:
- dependency-name: Swatinem/rust-cache
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-24 13:56:01 -06:00
dependabot[bot] db80173177 chore(deps): bump actions/download-artifact from 4.1.9 to 4.2.1
Bumps [actions/download-artifact](https://github.com/actions/download-artifact) from 4.1.9 to 4.2.1.
- [Release notes](https://github.com/actions/download-artifact/releases)
- [Commits](cc20338598...95815c38cf)

---
updated-dependencies:
- dependency-name: actions/download-artifact
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-24 13:54:17 -06:00
dependabot[bot] 6b4946dd32 chore(deps): bump taiki-e/install-action from 2.49.28 to 2.49.34
Bumps [taiki-e/install-action](https://github.com/taiki-e/install-action) from 2.49.28 to 2.49.34.
- [Release notes](https://github.com/taiki-e/install-action/releases)
- [Changelog](https://github.com/taiki-e/install-action/blob/main/CHANGELOG.md)
- [Commits](d7975a1de2...914ac1e29d)

---
updated-dependencies:
- dependency-name: taiki-e/install-action
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-24 13:53:43 -06:00
dependabot[bot] 897192b894 chore(deps): bump github/codeql-action from 3.28.11 to 3.28.13
Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.28.11 to 3.28.13.
- [Release notes](https://github.com/github/codeql-action/releases)
- [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md)
- [Commits](6bb031afdd...1b549b9259)

---
updated-dependencies:
- dependency-name: github/codeql-action
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-24 13:50:33 -06:00
dependabot[bot] d715170d01 chore(deps): bump actions/upload-artifact from 4.6.1 to 4.6.2
Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4.6.1 to 4.6.2.
- [Release notes](https://github.com/actions/upload-artifact/releases)
- [Commits](4cec3d8aa0...ea165f8d65)

---
updated-dependencies:
- dependency-name: actions/upload-artifact
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-24 13:48:15 -06:00
Taylor Thomas 8a1cd9e8e4 chore: Bump dep versions in preparation for release
Now that the control client is released, we can release wadm to continue
releasing the rest of the host monorepo

Signed-off-by: Taylor Thomas <taylor@cosmonic.com>
2025-03-19 15:00:57 -06:00
dependabot[bot] 93fbb9f4a3 chore(deps): bump taiki-e/install-action from 2.49.18 to 2.49.28
Bumps [taiki-e/install-action](https://github.com/taiki-e/install-action) from 2.49.18 to 2.49.28.
- [Release notes](https://github.com/taiki-e/install-action/releases)
- [Changelog](https://github.com/taiki-e/install-action/blob/main/CHANGELOG.md)
- [Commits](f87f9990b0...d7975a1de2)

---
updated-dependencies:
- dependency-name: taiki-e/install-action
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-17 13:35:45 -04:00
dependabot[bot] 6e57d6f197 chore(deps): bump docker/login-action from 3.3.0 to 3.4.0
Bumps [docker/login-action](https://github.com/docker/login-action) from 3.3.0 to 3.4.0.
- [Release notes](https://github.com/docker/login-action/releases)
- [Commits](9780b0c442...74a5d14239)

---
updated-dependencies:
- dependency-name: docker/login-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-17 13:26:35 -04:00
dependabot[bot] b3ebcd2e2a chore(deps): bump ulid from 1.2.0 to 1.2.1
Bumps [ulid](https://github.com/dylanhart/ulid-rs) from 1.2.0 to 1.2.1.
- [Commits](https://github.com/dylanhart/ulid-rs/compare/v1.2.0...v1.2.1)

---
updated-dependencies:
- dependency-name: ulid
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-17 13:26:19 -04:00
dependabot[bot] 6c8dd444ba chore(deps): bump indexmap from 2.7.1 to 2.8.0
Bumps [indexmap](https://github.com/indexmap-rs/indexmap) from 2.7.1 to 2.8.0.
- [Changelog](https://github.com/indexmap-rs/indexmap/blob/main/RELEASES.md)
- [Commits](https://github.com/indexmap-rs/indexmap/compare/2.7.1...2.8.0)

---
updated-dependencies:
- dependency-name: indexmap
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-17 13:26:06 -04:00
dependabot[bot] 005d599bcd chore(deps): bump tokio from 1.44.0 to 1.44.1
Bumps [tokio](https://github.com/tokio-rs/tokio) from 1.44.0 to 1.44.1.
- [Release notes](https://github.com/tokio-rs/tokio/releases)
- [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.44.0...tokio-1.44.1)

---
updated-dependencies:
- dependency-name: tokio
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-17 13:25:59 -04:00
dependabot[bot] 86af1498cb chore(deps): bump serde from 1.0.217 to 1.0.219
Bumps [serde](https://github.com/serde-rs/serde) from 1.0.217 to 1.0.219.
- [Release notes](https://github.com/serde-rs/serde/releases)
- [Commits](https://github.com/serde-rs/serde/compare/v1.0.217...v1.0.219)

---
updated-dependencies:
- dependency-name: serde
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-17 13:25:50 -04:00
dependabot[bot] 60f0014449 chore(deps): bump async-trait from 0.1.87 to 0.1.88
Bumps [async-trait](https://github.com/dtolnay/async-trait) from 0.1.87 to 0.1.88.
- [Release notes](https://github.com/dtolnay/async-trait/releases)
- [Commits](https://github.com/dtolnay/async-trait/compare/0.1.87...0.1.88)

---
updated-dependencies:
- dependency-name: async-trait
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-17 13:25:42 -04:00
dependabot[bot] a329be44a3 chore(deps): bump testcontainers from 0.23.2 to 0.23.3
Bumps [testcontainers](https://github.com/testcontainers/testcontainers-rs) from 0.23.2 to 0.23.3.
- [Release notes](https://github.com/testcontainers/testcontainers-rs/releases)
- [Changelog](https://github.com/testcontainers/testcontainers-rs/blob/main/CHANGELOG.md)
- [Commits](https://github.com/testcontainers/testcontainers-rs/compare/0.23.2...0.23.3)

---
updated-dependencies:
- dependency-name: testcontainers
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-12 14:13:08 -06:00
Brooks Townsend 14f7ed1bab chore(deps)!: upgrade async-nats to 0.39
Signed-off-by: Brooks Townsend <brooksmtownsend@gmail.com>
2025-03-12 16:05:02 -04:00
Brooks Townsend 39b79638ad chore(wadm): bump to 0.20.3 for release
Signed-off-by: Brooks Townsend <brooksmtownsend@gmail.com>
2025-03-11 14:41:37 -04:00
Brooks Townsend ac747cd8bc fix(scaler): react to config set events for components/providers
Signed-off-by: Brooks Townsend <brooksmtownsend@gmail.com>
2025-03-11 14:41:37 -04:00
dependabot[bot] 77f33f08f6 chore(deps): bump uuid from 1.13.1 to 1.15.1
Bumps [uuid](https://github.com/uuid-rs/uuid) from 1.13.1 to 1.15.1.
- [Release notes](https://github.com/uuid-rs/uuid/releases)
- [Commits](https://github.com/uuid-rs/uuid/compare/1.13.1...v1.15.1)

---
updated-dependencies:
- dependency-name: uuid
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-10 13:55:00 -06:00
dependabot[bot] 130c8f4a70 chore(deps): bump chrono from 0.4.39 to 0.4.40
Bumps [chrono](https://github.com/chronotope/chrono) from 0.4.39 to 0.4.40.
- [Release notes](https://github.com/chronotope/chrono/releases)
- [Changelog](https://github.com/chronotope/chrono/blob/main/CHANGELOG.md)
- [Commits](https://github.com/chronotope/chrono/compare/v0.4.39...v0.4.40)

---
updated-dependencies:
- dependency-name: chrono
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-10 13:52:45 -06:00
dependabot[bot] e9f017b809 chore(deps): bump tokio from 1.43.0 to 1.44.0
Bumps [tokio](https://github.com/tokio-rs/tokio) from 1.43.0 to 1.44.0.
- [Release notes](https://github.com/tokio-rs/tokio/releases)
- [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.43.0...tokio-1.44.0)

---
updated-dependencies:
- dependency-name: tokio
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-10 13:49:53 -06:00
dependabot[bot] 1365854fbb chore(deps): bump thiserror from 2.0.11 to 2.0.12
Bumps [thiserror](https://github.com/dtolnay/thiserror) from 2.0.11 to 2.0.12.
- [Release notes](https://github.com/dtolnay/thiserror/releases)
- [Commits](https://github.com/dtolnay/thiserror/compare/2.0.11...2.0.12)

---
updated-dependencies:
- dependency-name: thiserror
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-10 13:43:35 -06:00
dependabot[bot] 8164b443fc chore(deps): bump azure/setup-helm from 4.2.0 to 4.3.0
Bumps [azure/setup-helm](https://github.com/azure/setup-helm) from 4.2.0 to 4.3.0.
- [Release notes](https://github.com/azure/setup-helm/releases)
- [Changelog](https://github.com/Azure/setup-helm/blob/main/CHANGELOG.md)
- [Commits](fe7b79cd5e...b9e51907a0)

---
updated-dependencies:
- dependency-name: azure/setup-helm
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-10 13:42:39 -06:00
dependabot[bot] 445622df2e chore(deps): bump ossf/scorecard-action from 2.4.0 to 2.4.1
Bumps [ossf/scorecard-action](https://github.com/ossf/scorecard-action) from 2.4.0 to 2.4.1.
- [Release notes](https://github.com/ossf/scorecard-action/releases)
- [Changelog](https://github.com/ossf/scorecard-action/blob/main/RELEASE.md)
- [Commits](62b2cac7ed...f49aabe0b5)

---
updated-dependencies:
- dependency-name: ossf/scorecard-action
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-10 13:42:09 -06:00
dependabot[bot] e218cdae70 chore(deps): bump github/codeql-action from 3.28.10 to 3.28.11
Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.28.10 to 3.28.11.
- [Release notes](https://github.com/github/codeql-action/releases)
- [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md)
- [Commits](b56ba49b26...6bb031afdd)

---
updated-dependencies:
- dependency-name: github/codeql-action
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-10 13:41:37 -06:00
dependabot[bot] f74f7f8f54 chore(deps): bump taiki-e/install-action from 2.48.13 to 2.49.18
Bumps [taiki-e/install-action](https://github.com/taiki-e/install-action) from 2.48.13 to 2.49.18.
- [Release notes](https://github.com/taiki-e/install-action/releases)
- [Changelog](https://github.com/taiki-e/install-action/blob/main/CHANGELOG.md)
- [Commits](ad0904967b...f87f9990b0)

---
updated-dependencies:
- dependency-name: taiki-e/install-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-10 12:56:26 -06:00
dependabot[bot] 734c726f14 chore(deps): bump actions/download-artifact from 4.1.8 to 4.1.9
Bumps [actions/download-artifact](https://github.com/actions/download-artifact) from 4.1.8 to 4.1.9.
- [Release notes](https://github.com/actions/download-artifact/releases)
- [Commits](fa0a91b85d...cc20338598)

---
updated-dependencies:
- dependency-name: actions/download-artifact
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-10 12:54:15 -06:00
Nicolas Lamirault 0fba847245
feat(helm): Kubernetes labels (#598)
Signed-off-by: Nicolas Lamirault <nicolas.lamirault@gmail.com>
2025-03-06 19:28:57 -05:00
dependabot[bot] a2c022b462 chore(deps): bump anyhow from 1.0.95 to 1.0.97
Bumps [anyhow](https://github.com/dtolnay/anyhow) from 1.0.95 to 1.0.97.
- [Release notes](https://github.com/dtolnay/anyhow/releases)
- [Commits](https://github.com/dtolnay/anyhow/compare/1.0.95...1.0.97)

---
updated-dependencies:
- dependency-name: anyhow
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-03 10:42:00 -07:00
dependabot[bot] 4db8763a0f chore(deps): bump schemars from 0.8.21 to 0.8.22
Bumps [schemars](https://github.com/GREsau/schemars) from 0.8.21 to 0.8.22.
- [Release notes](https://github.com/GREsau/schemars/releases)
- [Changelog](https://github.com/GREsau/schemars/blob/master/CHANGELOG.md)
- [Commits](https://github.com/GREsau/schemars/compare/v0.8.21...v0.8.22)

---
updated-dependencies:
- dependency-name: schemars
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-03 10:29:22 -07:00
dependabot[bot] 7958bfbced chore(deps): bump async-trait from 0.1.86 to 0.1.87
Bumps [async-trait](https://github.com/dtolnay/async-trait) from 0.1.86 to 0.1.87.
- [Release notes](https://github.com/dtolnay/async-trait/releases)
- [Commits](https://github.com/dtolnay/async-trait/compare/0.1.86...0.1.87)

---
updated-dependencies:
- dependency-name: async-trait
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-03 10:27:54 -07:00
dependabot[bot] 37eb784b82 chore(deps): bump serde_json from 1.0.138 to 1.0.140
Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.138 to 1.0.140.
- [Release notes](https://github.com/serde-rs/json/releases)
- [Commits](https://github.com/serde-rs/json/compare/v1.0.138...v1.0.140)

---
updated-dependencies:
- dependency-name: serde_json
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-03 10:27:06 -07:00
dependabot[bot] 16191d081a chore(deps): bump actions/upload-artifact from 4.6.0 to 4.6.1
Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4.6.0 to 4.6.1.
- [Release notes](https://github.com/actions/upload-artifact/releases)
- [Commits](65c4c4a1dd...4cec3d8aa0)

---
updated-dependencies:
- dependency-name: actions/upload-artifact
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-03 10:26:39 -07:00
dependabot[bot] a5424b7e4c chore(deps): bump github/codeql-action from 3.28.9 to 3.28.10
Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.28.9 to 3.28.10.
- [Release notes](https://github.com/github/codeql-action/releases)
- [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md)
- [Commits](9e8d0789d4...b56ba49b26)

---
updated-dependencies:
- dependency-name: github/codeql-action
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-03 10:26:06 -07:00
dependabot[bot] 2e3abbcba0 chore(deps): bump docker/setup-qemu-action from 3.4.0 to 3.6.0
Bumps [docker/setup-qemu-action](https://github.com/docker/setup-qemu-action) from 3.4.0 to 3.6.0.
- [Release notes](https://github.com/docker/setup-qemu-action/releases)
- [Commits](4574d27a47...29109295f8)

---
updated-dependencies:
- dependency-name: docker/setup-qemu-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-03 10:25:04 -07:00
dependabot[bot] 720113d026 chore(deps): bump docker/setup-buildx-action from 3.9.0 to 3.10.0
Bumps [docker/setup-buildx-action](https://github.com/docker/setup-buildx-action) from 3.9.0 to 3.10.0.
- [Release notes](https://github.com/docker/setup-buildx-action/releases)
- [Commits](f7ce87c1d6...b5ca514318)

---
updated-dependencies:
- dependency-name: docker/setup-buildx-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-03 10:24:27 -07:00
dependabot[bot] 80bba4fb9f chore(deps): bump docker/build-push-action from 6.13.0 to 6.15.0
Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 6.13.0 to 6.15.0.
- [Release notes](https://github.com/docker/build-push-action/releases)
- [Commits](ca877d9245...471d1dc4e0)

---
updated-dependencies:
- dependency-name: docker/build-push-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-03-03 10:23:56 -07:00
dependabot[bot] 2e474c5d0c
chore(deps): bump clap from 4.5.28 to 4.5.29 (#597)
Bumps [clap](https://github.com/clap-rs/clap) from 4.5.28 to 4.5.29.
- [Release notes](https://github.com/clap-rs/clap/releases)
- [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md)
- [Commits](https://github.com/clap-rs/clap/compare/clap_complete-v4.5.28...clap_complete-v4.5.29)

---
updated-dependencies:
- dependency-name: clap
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-02-17 09:36:09 -06:00
dependabot[bot] ceda608718
chore(deps): bump taiki-e/install-action from 2.48.9 to 2.48.13 (#596)
Bumps [taiki-e/install-action](https://github.com/taiki-e/install-action) from 2.48.9 to 2.48.13.
- [Release notes](https://github.com/taiki-e/install-action/releases)
- [Changelog](https://github.com/taiki-e/install-action/blob/main/CHANGELOG.md)
- [Commits](995f97569c...ad0904967b)

---
updated-dependencies:
- dependency-name: taiki-e/install-action
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-02-17 09:35:32 -06:00
dependabot[bot] 6b9d6fd26f chore(deps): bump uuid from 1.12.1 to 1.13.1
Bumps [uuid](https://github.com/uuid-rs/uuid) from 1.12.1 to 1.13.1.
- [Release notes](https://github.com/uuid-rs/uuid/releases)
- [Commits](https://github.com/uuid-rs/uuid/compare/1.12.1...1.13.1)

---
updated-dependencies:
- dependency-name: uuid
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-02-11 12:35:57 -05:00
dependabot[bot] 44753eb992 chore(deps): bump testcontainers from 0.23.1 to 0.23.2
Bumps [testcontainers](https://github.com/testcontainers/testcontainers-rs) from 0.23.1 to 0.23.2.
- [Release notes](https://github.com/testcontainers/testcontainers-rs/releases)
- [Changelog](https://github.com/testcontainers/testcontainers-rs/blob/main/CHANGELOG.md)
- [Commits](https://github.com/testcontainers/testcontainers-rs/compare/0.23.1...0.23.2)

---
updated-dependencies:
- dependency-name: testcontainers
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-02-11 12:29:38 -05:00
dependabot[bot] c5694226c8 chore(deps): bump taiki-e/install-action from 2.48.1 to 2.48.9
Bumps [taiki-e/install-action](https://github.com/taiki-e/install-action) from 2.48.1 to 2.48.9.
- [Release notes](https://github.com/taiki-e/install-action/releases)
- [Changelog](https://github.com/taiki-e/install-action/blob/main/CHANGELOG.md)
- [Commits](510b3ecd79...995f97569c)

---
updated-dependencies:
- dependency-name: taiki-e/install-action
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-02-11 12:29:22 -05:00
dependabot[bot] c808f7a07a chore(deps): bump ulid from 1.1.4 to 1.2.0
Bumps [ulid](https://github.com/dylanhart/ulid-rs) from 1.1.4 to 1.2.0.
- [Commits](https://github.com/dylanhart/ulid-rs/compare/v1.1.4...v1.2.0)

---
updated-dependencies:
- dependency-name: ulid
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-02-11 12:29:17 -05:00
dependabot[bot] eaebdd918e chore(deps): bump clap from 4.5.27 to 4.5.28
Bumps [clap](https://github.com/clap-rs/clap) from 4.5.27 to 4.5.28.
- [Release notes](https://github.com/clap-rs/clap/releases)
- [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md)
- [Commits](https://github.com/clap-rs/clap/compare/clap_complete-v4.5.27...clap_complete-v4.5.28)

---
updated-dependencies:
- dependency-name: clap
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-02-11 12:29:12 -05:00
dependabot[bot] e756aa038f chore(deps): bump docker/setup-buildx-action from 3.8.0 to 3.9.0
Bumps [docker/setup-buildx-action](https://github.com/docker/setup-buildx-action) from 3.8.0 to 3.9.0.
- [Release notes](https://github.com/docker/setup-buildx-action/releases)
- [Commits](6524bf65af...f7ce87c1d6)

---
updated-dependencies:
- dependency-name: docker/setup-buildx-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-02-11 12:28:58 -05:00
dependabot[bot] ba04447356 chore(deps): bump docker/setup-qemu-action from 3.3.0 to 3.4.0
Bumps [docker/setup-qemu-action](https://github.com/docker/setup-qemu-action) from 3.3.0 to 3.4.0.
- [Release notes](https://github.com/docker/setup-qemu-action/releases)
- [Commits](53851d1459...4574d27a47)

---
updated-dependencies:
- dependency-name: docker/setup-qemu-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-02-11 12:28:46 -05:00
dependabot[bot] 386eebd33f chore(deps): bump github/codeql-action from 3.28.8 to 3.28.9
Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.28.8 to 3.28.9.
- [Release notes](https://github.com/github/codeql-action/releases)
- [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md)
- [Commits](dd746615b3...9e8d0789d4)

---
updated-dependencies:
- dependency-name: github/codeql-action
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-02-11 12:28:33 -05:00
Brooks Townsend 1926bf070f chore: ignore dependabot commits in release notes
Signed-off-by: Brooks Townsend <brooksmtownsend@gmail.com>
2025-02-11 11:48:34 -05:00
Brooks Townsend ddb912553a chore: patch all for release
Signed-off-by: Brooks Townsend <brooksmtownsend@gmail.com>
2025-02-11 11:48:34 -05:00
Stuart Harris bdf06dc5d9 only check configs that declare properties for unique names.
Signed-off-by: Stuart Harris <stuart.harris@red-badger.com>
2025-02-07 09:09:49 -05:00
dependabot[bot] ffc655e749 chore(deps): bump hyper from 1.5.2 to 1.6.0
Bumps [hyper](https://github.com/hyperium/hyper) from 1.5.2 to 1.6.0.
- [Release notes](https://github.com/hyperium/hyper/releases)
- [Changelog](https://github.com/hyperium/hyper/blob/master/CHANGELOG.md)
- [Commits](https://github.com/hyperium/hyper/compare/v1.5.2...v1.6.0)

---
updated-dependencies:
- dependency-name: hyper
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-02-05 10:47:05 -05:00
dependabot[bot] 7218266206 chore(deps): bump async-trait from 0.1.85 to 0.1.86
Bumps [async-trait](https://github.com/dtolnay/async-trait) from 0.1.85 to 0.1.86.
- [Release notes](https://github.com/dtolnay/async-trait/releases)
- [Commits](https://github.com/dtolnay/async-trait/compare/0.1.85...0.1.86)

---
updated-dependencies:
- dependency-name: async-trait
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-02-04 18:47:51 -05:00
dependabot[bot] cb00233aaa chore(deps): bump serde_json from 1.0.137 to 1.0.138
Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.137 to 1.0.138.
- [Release notes](https://github.com/serde-rs/json/releases)
- [Commits](https://github.com/serde-rs/json/compare/v1.0.137...v1.0.138)

---
updated-dependencies:
- dependency-name: serde_json
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-02-04 18:44:45 -05:00
dependabot[bot] 7a94b8565c chore(deps): bump bytes from 1.9.0 to 1.10.0
Bumps [bytes](https://github.com/tokio-rs/bytes) from 1.9.0 to 1.10.0.
- [Release notes](https://github.com/tokio-rs/bytes/releases)
- [Changelog](https://github.com/tokio-rs/bytes/blob/master/CHANGELOG.md)
- [Commits](https://github.com/tokio-rs/bytes/compare/v1.9.0...v1.10.0)

---
updated-dependencies:
- dependency-name: bytes
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-02-04 18:44:19 -05:00
dependabot[bot] 66ca4cc9f5 chore(deps): bump softprops/action-gh-release from 2.0.9 to 2.2.1
Bumps [softprops/action-gh-release](https://github.com/softprops/action-gh-release) from 2.0.9 to 2.2.1.
- [Release notes](https://github.com/softprops/action-gh-release/releases)
- [Changelog](https://github.com/softprops/action-gh-release/blob/master/CHANGELOG.md)
- [Commits](e7a8f85e1c...c95fe14893)

---
updated-dependencies:
- dependency-name: softprops/action-gh-release
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-02-03 13:55:15 -05:00
luk3ark c8e715a088 fix(ci): correct wadm WIT tarball structure
Signed-off-by: luk3ark <luk3ark@gmail.com>
2025-02-03 13:47:34 -05:00
dependabot[bot] a5066c16dd
chore(deps): bump taiki-e/install-action from 2.47.25 to 2.48.1 (#579)
Bumps [taiki-e/install-action](https://github.com/taiki-e/install-action) from 2.47.25 to 2.48.1.
- [Release notes](https://github.com/taiki-e/install-action/releases)
- [Changelog](https://github.com/taiki-e/install-action/blob/main/CHANGELOG.md)
- [Commits](https://github.com/taiki-e/install-action/compare/v2.47.25...510b3ecd7915856b6909305605afa7a8a57c1b04)

---
updated-dependencies:
- dependency-name: taiki-e/install-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-02-03 10:59:16 -06:00
dependabot[bot] e4de5fc83e
chore(deps): bump actions/setup-python from 5.3.0 to 5.4.0 (#578)
Bumps [actions/setup-python](https://github.com/actions/setup-python) from 5.3.0 to 5.4.0.
- [Release notes](https://github.com/actions/setup-python/releases)
- [Commits](0b93645e9f...42375524e2)

---
updated-dependencies:
- dependency-name: actions/setup-python
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-02-03 10:58:42 -06:00
dependabot[bot] b26427c3ec
chore(deps): bump github/codeql-action from 3.27.9 to 3.28.8 (#577)
Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.27.9 to 3.28.8.
- [Release notes](https://github.com/github/codeql-action/releases)
- [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md)
- [Commits](df409f7d92...dd746615b3)

---
updated-dependencies:
- dependency-name: github/codeql-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-02-03 10:13:28 -06:00
Taylor Thomas 2113aa3781 chore: Bumps versions for patch release
I also did a little housekeeping here to fix a bunch of clippy lints and
updated the flake inputs

Signed-off-by: Taylor Thomas <taylor@cosmonic.com>
2025-01-30 12:59:42 -05:00
dependabot[bot] 55444f27f2 chore(deps): bump uuid from 1.12.0 to 1.12.1
Bumps [uuid](https://github.com/uuid-rs/uuid) from 1.12.0 to 1.12.1.
- [Release notes](https://github.com/uuid-rs/uuid/releases)
- [Commits](https://github.com/uuid-rs/uuid/compare/1.12.0...1.12.1)

---
updated-dependencies:
- dependency-name: uuid
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-01-27 10:35:51 -05:00
dependabot[bot] 797eddf5c1 chore(deps): bump docker/build-push-action from 6.10.0 to 6.13.0
Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 6.10.0 to 6.13.0.
- [Release notes](https://github.com/docker/build-push-action/releases)
- [Commits](48aba3b46d...ca877d9245)

---
updated-dependencies:
- dependency-name: docker/build-push-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-01-27 10:28:22 -05:00
dependabot[bot] 55be7d8558 chore(deps): bump taiki-e/install-action from 2.46.11 to 2.47.25
Bumps [taiki-e/install-action](https://github.com/taiki-e/install-action) from 2.46.11 to 2.47.25.
- [Release notes](https://github.com/taiki-e/install-action/releases)
- [Changelog](https://github.com/taiki-e/install-action/blob/main/CHANGELOG.md)
- [Commits](ed8c79bccf...1936c8cfe3)

---
updated-dependencies:
- dependency-name: taiki-e/install-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-01-27 10:28:06 -05:00
dependabot[bot] 7d59eb4746 chore(deps): bump serde_json from 1.0.135 to 1.0.137
Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.135 to 1.0.137.
- [Release notes](https://github.com/serde-rs/json/releases)
- [Commits](https://github.com/serde-rs/json/compare/v1.0.135...v1.0.137)

---
updated-dependencies:
- dependency-name: serde_json
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-01-27 10:27:47 -05:00
dependabot[bot] 4bb74d04fe chore(deps): bump wasmcloud-control-interface from 2.2.0 to 2.3.0
Bumps [wasmcloud-control-interface](https://github.com/wasmCloud/wasmCloud) from 2.2.0 to 2.3.0.
- [Release notes](https://github.com/wasmCloud/wasmCloud/releases)
- [Changelog](https://github.com/wasmCloud/wasmCloud/blob/main/CHANGELOG.md)
- [Commits](https://github.com/wasmCloud/wasmCloud/commits)

---
updated-dependencies:
- dependency-name: wasmcloud-control-interface
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-01-27 10:27:31 -05:00
dependabot[bot] 1f902b248c chore(deps): bump helm/chart-testing-action from 2.6.1 to 2.7.0
Bumps [helm/chart-testing-action](https://github.com/helm/chart-testing-action) from 2.6.1 to 2.7.0.
- [Release notes](https://github.com/helm/chart-testing-action/releases)
- [Commits](e6669bcd63...0d28d3144d)

---
updated-dependencies:
- dependency-name: helm/chart-testing-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-01-27 10:27:23 -05:00
dependabot[bot] 34fb5e69b2 chore(deps): bump clap from 4.5.26 to 4.5.27
Bumps [clap](https://github.com/clap-rs/clap) from 4.5.26 to 4.5.27.
- [Release notes](https://github.com/clap-rs/clap/releases)
- [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md)
- [Commits](https://github.com/clap-rs/clap/compare/clap_complete-v4.5.26...clap_complete-v4.5.27)

---
updated-dependencies:
- dependency-name: clap
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-01-27 10:27:20 -05:00
luk3ark efeb6a020d fix(wadm): correct status topic name in WADM
Signed-off-by: luk3ark <luk3ark@gmail.com>
2025-01-27 10:25:29 -05:00
Taylor Thomas e492823998 fix(ci): Right tarball location
Signed-off-by: Taylor Thomas <taylor@cosmonic.com>
2025-01-24 14:15:20 -07:00
Taylor Thomas ad2cb51238 fix(ci): Passes the right directory for wit builds
Signed-off-by: Taylor Thomas <taylor@cosmonic.com>
2025-01-24 12:46:09 -07:00
luk3ark 95633628af feat(ci): add OCI publishing to WADM WIT workflow
Signed-off-by: luk3ark <luk3ark@gmail.com>
2025-01-23 16:53:51 -07:00
dependabot[bot] 9fbc598eff
chore(deps): bump actions/checkout from 4.1.1 to 4.2.2 (#557)
Bumps [actions/checkout](https://github.com/actions/checkout) from 4.1.1 to 4.2.2.
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](https://github.com/actions/checkout/compare/v4.1.1...11bd71901bbe5b1630ceea73d27597364c9af683)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-01-20 18:26:39 +00:00
dependabot[bot] 830b02545a
chore(deps): bump indexmap from 2.7.0 to 2.7.1 (#554)
Bumps [indexmap](https://github.com/indexmap-rs/indexmap) from 2.7.0 to 2.7.1.
- [Changelog](https://github.com/indexmap-rs/indexmap/blob/master/RELEASES.md)
- [Commits](https://github.com/indexmap-rs/indexmap/compare/2.7.0...2.7.1)

---
updated-dependencies:
- dependency-name: indexmap
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-01-20 12:26:05 -06:00
dependabot[bot] 9475e4c542
chore(deps): bump semver from 1.0.24 to 1.0.25 (#553)
Bumps [semver](https://github.com/dtolnay/semver) from 1.0.24 to 1.0.25.
- [Release notes](https://github.com/dtolnay/semver/releases)
- [Commits](https://github.com/dtolnay/semver/compare/1.0.24...1.0.25)

---
updated-dependencies:
- dependency-name: semver
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-01-20 12:25:47 -06:00
dependabot[bot] 84d4f48783
chore(deps): bump thiserror from 2.0.9 to 2.0.11 (#552)
Bumps [thiserror](https://github.com/dtolnay/thiserror) from 2.0.9 to 2.0.11.
- [Release notes](https://github.com/dtolnay/thiserror/releases)
- [Commits](https://github.com/dtolnay/thiserror/compare/2.0.9...2.0.11)

---
updated-dependencies:
- dependency-name: thiserror
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-01-20 12:25:25 -06:00
dependabot[bot] 95d256215b
chore(deps): bump uuid from 1.11.0 to 1.12.0 (#551)
Bumps [uuid](https://github.com/uuid-rs/uuid) from 1.11.0 to 1.12.0.
- [Release notes](https://github.com/uuid-rs/uuid/releases)
- [Commits](https://github.com/uuid-rs/uuid/compare/1.11.0...1.12.0)

---
updated-dependencies:
- dependency-name: uuid
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-01-20 12:24:55 -06:00
dependabot[bot] 7e97f6e615
chore(deps): bump tokio from 1.42.0 to 1.43.0 (#550)
Bumps [tokio](https://github.com/tokio-rs/tokio) from 1.42.0 to 1.43.0.
- [Release notes](https://github.com/tokio-rs/tokio/releases)
- [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.42.0...tokio-1.43.0)

---
updated-dependencies:
- dependency-name: tokio
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-01-20 12:23:55 -06:00
dependabot[bot] bcc2b7f461
chore(deps): bump Swatinem/rust-cache from 2.7.5 to 2.7.7 (#555)
Bumps [Swatinem/rust-cache](https://github.com/swatinem/rust-cache) from 2.7.5 to 2.7.7.
- [Release notes](https://github.com/swatinem/rust-cache/releases)
- [Changelog](https://github.com/Swatinem/rust-cache/blob/master/CHANGELOG.md)
- [Commits](82a92a6e8f...f0deed1e0e)

---
updated-dependencies:
- dependency-name: Swatinem/rust-cache
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-01-20 12:22:05 -06:00
dependabot[bot] 2aa35a9514
chore(deps): bump helm/kind-action from 1.11.0 to 1.12.0 (#556)
Bumps [helm/kind-action](https://github.com/helm/kind-action) from 1.11.0 to 1.12.0.
- [Release notes](https://github.com/helm/kind-action/releases)
- [Commits](ae94020eaf...a1b0e39133)

---
updated-dependencies:
- dependency-name: helm/kind-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-01-20 12:21:14 -06:00
dependabot[bot] f504e8c1b2
chore(deps): bump docker/setup-qemu-action from 3.2.0 to 3.3.0 (#558)
Bumps [docker/setup-qemu-action](https://github.com/docker/setup-qemu-action) from 3.2.0 to 3.3.0.
- [Release notes](https://github.com/docker/setup-qemu-action/releases)
- [Commits](49b3bc8e6b...53851d1459)

---
updated-dependencies:
- dependency-name: docker/setup-qemu-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-01-20 12:20:14 -06:00
dependabot[bot] 7658a4e654
chore(deps): bump actions/upload-artifact from 4.4.3 to 4.6.0 (#559)
Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4.4.3 to 4.6.0.
- [Release notes](https://github.com/actions/upload-artifact/releases)
- [Commits](https://github.com/actions/upload-artifact/compare/v4.4.3...65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08)

---
updated-dependencies:
- dependency-name: actions/upload-artifact
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-01-20 12:19:09 -06:00
Brooks Townsend 64e3d93118 refactor(wadm): better visibility controls
Signed-off-by: Brooks Townsend <brooksmtownsend@gmail.com>
2025-01-17 14:11:59 -05:00
Brooks Townsend 41e6e352cc chore: bump wadm to 0.20.0
Signed-off-by: Brooks Townsend <brooksmtownsend@gmail.com>
2025-01-17 14:11:59 -05:00
Brooks Townsend d169b1be62 test(wadm): fix relative path
Signed-off-by: Brooks Townsend <brooksmtownsend@gmail.com>
2025-01-17 14:11:59 -05:00
Brooks Townsend 4676947211 ci(wadm): test wadm crate feature combinations
Signed-off-by: Brooks Townsend <brooksmtownsend@gmail.com>
2025-01-17 14:11:59 -05:00
Brooks Townsend 78e077604e fix(wadm): properly gate imports behind http_admin
Signed-off-by: Brooks Townsend <brooksmtownsend@gmail.com>
2025-01-17 14:11:59 -05:00
Brooks Townsend a7a287ce7b chore(wadm): add http_admin feature
Signed-off-by: Brooks Townsend <brooksmtownsend@gmail.com>
2025-01-17 14:11:59 -05:00
Brooks Townsend 90dac77412 refactor: use cfg_attr to gate CLI config
Signed-off-by: Brooks Townsend <brooksmtownsend@gmail.com>
2025-01-17 14:11:59 -05:00
Brooks Townsend ab9ad612ee chore(deps): simplify CLI deps
Signed-off-by: Brooks Townsend <brooksmtownsend@gmail.com>
2025-01-17 14:11:59 -05:00
Brooks Townsend 18a66b2640 refactor(*)!: move start functionality to wadm lib
Signed-off-by: Brooks Townsend <brooksmtownsend@gmail.com>
2025-01-17 14:11:59 -05:00
Roman Volosatovs 13faa57248 feat: add HTTP admin endpoint
Signed-off-by: Roman Volosatovs <rvolosatovs@riseup.net>
2025-01-14 09:32:34 -05:00
dependabot[bot] b167486f48 chore(deps): bump utoipa from 5.3.0 to 5.3.1
Bumps [utoipa](https://github.com/juhaku/utoipa) from 5.3.0 to 5.3.1.
- [Release notes](https://github.com/juhaku/utoipa/releases)
- [Changelog](https://github.com/juhaku/utoipa/blob/master/utoipa-rapidoc/CHANGELOG.md)
- [Commits](https://github.com/juhaku/utoipa/compare/utoipa-5.3.0...utoipa-5.3.1)

---
updated-dependencies:
- dependency-name: utoipa
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-01-13 10:01:27 -05:00
dependabot[bot] 52500b4787 chore(deps): bump serde_json from 1.0.134 to 1.0.135
Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.134 to 1.0.135.
- [Release notes](https://github.com/serde-rs/json/releases)
- [Commits](https://github.com/serde-rs/json/compare/v1.0.134...v1.0.135)

---
updated-dependencies:
- dependency-name: serde_json
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-01-13 10:01:09 -05:00
dependabot[bot] 8df7924598 chore(deps): bump async-trait from 0.1.83 to 0.1.85
Bumps [async-trait](https://github.com/dtolnay/async-trait) from 0.1.83 to 0.1.85.
- [Release notes](https://github.com/dtolnay/async-trait/releases)
- [Commits](https://github.com/dtolnay/async-trait/compare/0.1.83...0.1.85)

---
updated-dependencies:
- dependency-name: async-trait
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-01-13 10:00:53 -05:00
dependabot[bot] 59e7e66562 chore(deps): bump clap from 4.5.23 to 4.5.26
Bumps [clap](https://github.com/clap-rs/clap) from 4.5.23 to 4.5.26.
- [Release notes](https://github.com/clap-rs/clap/releases)
- [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md)
- [Commits](https://github.com/clap-rs/clap/compare/clap_complete-v4.5.23...clap_complete-v4.5.26)

---
updated-dependencies:
- dependency-name: clap
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-01-13 10:00:49 -05:00
dependabot[bot] f88140893b chore(deps): bump ulid from 1.1.3 to 1.1.4
Bumps [ulid](https://github.com/dylanhart/ulid-rs) from 1.1.3 to 1.1.4.
- [Commits](https://github.com/dylanhart/ulid-rs/compare/v1.1.3...v1.1.4)

---
updated-dependencies:
- dependency-name: ulid
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-01-13 10:00:44 -05:00
Brooks Townsend 77f5bc8961 test(validation): allow misnamed interface as warning
Signed-off-by: Brooks Townsend <brooksmtownsend@gmail.com>
2025-01-03 16:30:50 -05:00
Brooks Townsend e67c9e580c fix(types): warn on unknown interface
Signed-off-by: Brooks Townsend <brooksmtownsend@gmail.com>
2025-01-03 16:30:50 -05:00
Brooks Townsend 4243efdc8f release(*): bump versions
Signed-off-by: Brooks Townsend <brooksmtownsend@gmail.com>
2025-01-02 10:42:13 -05:00
Márk Kővári 40d8b50c0e test(e2e): add one e2e test with memory persistence
Signed-off-by: Márk Kővári <kovarimarkofficial@gmail.com>
2025-01-02 09:54:09 -05:00
Márk Kővári 5a4c13fe75 chore(nats): remove redundant possible values, clap already generates
those

Signed-off-by: Márk Kővári <kovarimarkofficial@gmail.com>
2025-01-02 09:54:09 -05:00
Márk Kővári b6b398ecd7 chore(stream-persistence): remove hidden and lowercase options
Signed-off-by: Márk Kővári <kovarimarkofficial@gmail.com>
2025-01-02 09:54:09 -05:00
Márk Kővári 6fc79d3c81 fix(typo): persistance to persistence
Signed-off-by: Márk Kővári <kovarimarkofficial@gmail.com>
2025-01-02 09:54:09 -05:00
Márk Kővári 7a811a6737 fix(clap): enum pascalcase rename
Signed-off-by: Márk Kővári <kovarimarkofficial@gmail.com>
2025-01-02 09:54:09 -05:00
Márk Kővári 1448671649 debug(storage): add storage default value failes WIP
Signed-off-by: Márk Kővári <kovarimarkofficial@gmail.com>
2025-01-02 09:54:09 -05:00
Márk Kővári f596dadcb8 fix(clippy): resolve clippy warning with ToString
Signed-off-by: Márk Kővári <kovarimarkofficial@gmail.com>
2025-01-02 09:54:09 -05:00
Márk Kővári ca868c5f79 feat(wadm): cli enable memory stream usage with flags
Signed-off-by: Márk Kővári <kovarimarkofficial@gmail.com>
2025-01-02 09:54:09 -05:00
luk3ark 11aa88b73f feat(deps): removed default std feature and updated to target_family
Signed-off-by: luk3ark <luk3ark@gmail.com>
2025-01-02 09:53:05 -05:00
luk3ark 6b768c1607 feat(deps): change feature gate back to wit
Signed-off-by: luk3ark <luk3ark@gmail.com>
2025-01-02 09:53:05 -05:00
luk3ark c26eb6d2fd feat(deps): removed redundant dependencies
Signed-off-by: luk3ark <luk3ark@gmail.com>
2025-01-02 09:53:05 -05:00
luk3ark f34b19a79b feat(deps): add separate wit-wasm and wit-std features
Signed-off-by: luk3ark <luk3ark@gmail.com>
2025-01-02 09:53:05 -05:00
luk3ark 532e4930ef feat(deps): add separate wit-wasm and wit-std features
Signed-off-by: luk3ark <luk3ark@gmail.com>
2025-01-02 09:53:05 -05:00
luk3ark 6004c9a136 allow unique interfaces across duplicate links and test
Signed-off-by: luk3ark <luk3ark@gmail.com>
2024-12-31 11:12:40 -05:00
dependabot[bot] 4af2a727c3 chore(deps): bump serde from 1.0.216 to 1.0.217
Bumps [serde](https://github.com/serde-rs/serde) from 1.0.216 to 1.0.217.
- [Release notes](https://github.com/serde-rs/serde/releases)
- [Commits](https://github.com/serde-rs/serde/compare/v1.0.216...v1.0.217)

---
updated-dependencies:
- dependency-name: serde
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-12-30 10:52:20 -07:00
dependabot[bot] d92b0b7e6a chore(deps): bump utoipa from 5.2.0 to 5.3.0
Bumps [utoipa](https://github.com/juhaku/utoipa) from 5.2.0 to 5.3.0.
- [Release notes](https://github.com/juhaku/utoipa/releases)
- [Changelog](https://github.com/juhaku/utoipa/blob/master/utoipa-rapidoc/CHANGELOG.md)
- [Commits](https://github.com/juhaku/utoipa/compare/utoipa-5.2.0...utoipa-5.3.0)

---
updated-dependencies:
- dependency-name: utoipa
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-12-23 10:47:12 -05:00
dependabot[bot] ab26db73b7 chore(deps): bump serde_json from 1.0.133 to 1.0.134
Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.133 to 1.0.134.
- [Release notes](https://github.com/serde-rs/json/releases)
- [Commits](https://github.com/serde-rs/json/compare/v1.0.133...v1.0.134)

---
updated-dependencies:
- dependency-name: serde_json
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-12-23 10:47:00 -05:00
dependabot[bot] 229411893a chore(deps): bump anyhow from 1.0.94 to 1.0.95
Bumps [anyhow](https://github.com/dtolnay/anyhow) from 1.0.94 to 1.0.95.
- [Release notes](https://github.com/dtolnay/anyhow/releases)
- [Commits](https://github.com/dtolnay/anyhow/compare/1.0.94...1.0.95)

---
updated-dependencies:
- dependency-name: anyhow
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-12-23 10:46:45 -05:00
dependabot[bot] e2de3fe6b8 chore(deps): bump thiserror from 2.0.7 to 2.0.9
Bumps [thiserror](https://github.com/dtolnay/thiserror) from 2.0.7 to 2.0.9.
- [Release notes](https://github.com/dtolnay/thiserror/releases)
- [Commits](https://github.com/dtolnay/thiserror/compare/2.0.7...2.0.9)

---
updated-dependencies:
- dependency-name: thiserror
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-12-23 10:46:41 -05:00
Vikrant Palle 062130e6f1 improve test_healthy_providers_return_healthy_status unit test
Signed-off-by: Vikrant Palle <vikrantpalle@gmail.com>
2024-12-23 09:10:29 -05:00
Vikrant Palle df0bf72cde nit: formatting fixes
Signed-off-by: Vikrant Palle <vikrantpalle@gmail.com>
2024-12-23 09:10:29 -05:00
Vikrant Palle dad1bd9f66 use StatusType::Failed in scaler status
Signed-off-by: Vikrant Palle <vikrantpalle@gmail.com>
2024-12-23 09:10:29 -05:00
Vikrant Palle a0da5ef75e add unhealthy status to bindings
Signed-off-by: Vikrant Palle <vikrantpalle@gmail.com>
2024-12-23 09:10:29 -05:00
Vikrant Palle f1d68a87d5 refactor health check event handling
Signed-off-by: Vikrant Palle <vikrantpalle@gmail.com>
2024-12-23 09:10:29 -05:00
Vikrant Palle b67193a9f8 add unhealthy status type
Signed-off-by: Vikrant Palle <vikrantpalle@gmail.com>
2024-12-23 09:10:29 -05:00
Vikrant Palle 764e90ba1b add unit tests
Signed-off-by: Vikrant Palle <vikrantpalle@gmail.com>
2024-12-23 09:10:29 -05:00
Vikrant Palle 50b672ad30 reflect unhealthy providers in spreadscaler + daemonscaler
Signed-off-by: Vikrant Palle <vikrantpalle@gmail.com>
2024-12-23 09:10:29 -05:00
Márk Kővári 265f732fc8 feat(validation): warn for link props source_configs and target_configs
Signed-off-by: Márk Kővári <kovarimarkofficial@gmail.com>
2024-12-20 09:32:57 -07:00
Florian Fürstenberg b2a1082559 fix(server): Removed not needed arguments for checking for duplicate link config names (#478)
Signed-off-by: Florian Fürstenberg <florian.fuerstenberg@posteo.de>
2024-12-19 09:21:22 -05:00
Florian Fürstenberg 341ae617ec fix(server): Added validation logic for duplicated link config names (#478)
Signed-off-by: Florian Fürstenberg <florian.fuerstenberg@posteo.de>
2024-12-19 09:21:22 -05:00
Florian Fürstenberg a6223a3f74 fix(server): Added validation for duplicated link config names (#478)
Signed-off-by: Florian Fürstenberg <florian.fuerstenberg@posteo.de>
2024-12-19 09:21:22 -05:00
Taylor Thomas 38cb50f364 chore: Polishes up flake with a few more clarifying comments
Signed-off-by: Taylor Thomas <taylor@cosmonic.com>
2024-12-17 12:30:03 -07:00
Roman Volosatovs 2b50ef2877 feat: filter `Cargo.toml`
Signed-off-by: Roman Volosatovs <rvolosatovs@riseup.net>
2024-12-17 12:30:03 -07:00
Taylor Thomas 97e9e32066 feat(flake): Attempts to break up deps some more in the flake
Signed-off-by: Taylor Thomas <taylor@cosmonic.com>
2024-12-17 12:30:03 -07:00
Taylor Thomas c2ae9f2643 feat: Adds flake
Adds a nix flake for usage in building things. It is still missing the
ability to run an e2e test and build docker images, but it does work
for both building and nix shell

Signed-off-by: Taylor Thomas <taylor@oftaylor.com>
2024-12-17 12:30:03 -07:00
Joonas Bergius 864acfd28e
chore(ci): Pin GitHub Actions dependencies (#523)
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-12-17 12:04:35 -06:00
dependabot[bot] 994b881701 chore(deps): bump thiserror from 1.0.69 to 2.0.6
Bumps [thiserror](https://github.com/dtolnay/thiserror) from 1.0.69 to 2.0.6.
- [Release notes](https://github.com/dtolnay/thiserror/releases)
- [Commits](https://github.com/dtolnay/thiserror/compare/1.0.69...2.0.6)

---
updated-dependencies:
- dependency-name: thiserror
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-12-17 13:02:57 -05:00
dependabot[bot] 2cc4092daa
chore(deps): bump github/codeql-action from 3.27.6 to 3.27.9 (#520)
Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.27.6 to 3.27.9.
- [Release notes](https://github.com/github/codeql-action/releases)
- [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md)
- [Commits](aa57810251...df409f7d92)

---
updated-dependencies:
- dependency-name: github/codeql-action
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-12-16 16:30:52 -06:00
dependabot[bot] e1d665416e
chore(deps): bump helm/kind-action from 1.10.0 to 1.11.0 (#519)
Bumps [helm/kind-action](https://github.com/helm/kind-action) from 1.10.0 to 1.11.0.
- [Release notes](https://github.com/helm/kind-action/releases)
- [Commits](https://github.com/helm/kind-action/compare/v1.10.0...v1.11.0)

---
updated-dependencies:
- dependency-name: helm/kind-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-12-16 11:16:29 -06:00
dependabot[bot] 6e8eb504c9
chore(deps): bump ossf/scorecard-action from 2.3.1 to 2.4.0 (#521)
Bumps [ossf/scorecard-action](https://github.com/ossf/scorecard-action) from 2.3.1 to 2.4.0.
- [Release notes](https://github.com/ossf/scorecard-action/releases)
- [Changelog](https://github.com/ossf/scorecard-action/blob/main/RELEASE.md)
- [Commits](0864cf1902...62b2cac7ed)

---
updated-dependencies:
- dependency-name: ossf/scorecard-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-12-16 11:15:47 -06:00
luk3ark 7d80eca6aa remove unneeded wasm feature and revert naming
Signed-off-by: luk3ark <luk3ark@gmail.com>
2024-12-13 12:17:41 -07:00
luk3ark 54bf5cbb61 added feature flag for unused import
Signed-off-by: luk3ark <luk3ark@gmail.com>
2024-12-13 12:17:41 -07:00
luk3ark 65cfd337f6 fix
Signed-off-by: luk3ark <luk3ark@gmail.com>
2024-12-13 12:17:41 -07:00
luk3ark 87c64bdcd9 fix test dependency
Signed-off-by: luk3ark <luk3ark@gmail.com>
2024-12-13 12:17:41 -07:00
luk3ark 505debf7ff fix typo
Signed-off-by: luk3ark <luk3ark@gmail.com>
2024-12-13 12:17:41 -07:00
luk3ark c898e2eb20 added wasm flag for wadm-types
Signed-off-by: luk3ark <luk3ark@gmail.com>
2024-12-13 12:17:41 -07:00
luk3ark 5919660776 added wasm flag for wadm-types
Signed-off-by: luk3ark <luk3ark@gmail.com>
2024-12-13 12:17:41 -07:00
Florian Fürstenberg c1db5ff946 fix(server): Added missing test for test_delete_noop (#502)
Signed-off-by: Florian Fürstenberg <florian.fuerstenberg@posteo.de>
2024-12-12 10:34:23 -07:00
Florian Fürstenberg 163c28269a fix(server): Cover DeleteResult::Noop for delete_model if no version was specified (#502)
Signed-off-by: Florian Fürstenberg <florian.fuerstenberg@posteo.de>
2024-12-12 10:34:23 -07:00
Vikrant Palle e9c7cf4ab1 nit: move hashset inside loop
Signed-off-by: Vikrant Palle <vikrantpalle@gmail.com>
2024-12-12 12:34:07 -05:00
Vikrant Palle f137a9ab60 change duplicate link definition
Signed-off-by: Vikrant Palle <vikrantpalle@gmail.com>
2024-12-12 12:34:07 -05:00
Vikrant Palle d9c3627547 add check for duplicate links
Signed-off-by: Vikrant Palle <vikrantpalle@gmail.com>
2024-12-12 12:34:07 -05:00
luk3ark e8fe31f0ed added explicit generates
Signed-off-by: luk3ark <luk3ark@gmail.com>
2024-12-12 09:59:48 -07:00
luk3ark 18e5566a5e chore: bump wadm-types to 0.9.0 for wit-bindgen-wrpc
Signed-off-by: luk3ark <luk3ark@gmail.com>
2024-12-12 09:59:48 -07:00
Joonas Bergius 2561838039
chore: Add Security Policy with link to the main repository (#508)
Signed-off-by: Joonas Bergius <joonas@bergi.us>
2024-12-10 10:14:34 -06:00
dependabot[bot] 8c0ea8263d chore(deps): bump chrono from 0.4.38 to 0.4.39
Bumps [chrono](https://github.com/chronotope/chrono) from 0.4.38 to 0.4.39.
- [Release notes](https://github.com/chronotope/chrono/releases)
- [Changelog](https://github.com/chronotope/chrono/blob/main/CHANGELOG.md)
- [Commits](https://github.com/chronotope/chrono/compare/v0.4.38...v0.4.39)

---
updated-dependencies:
- dependency-name: chrono
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-12-10 09:35:35 -05:00
Joonas Bergius ae8ab69f24
chore: Fix scorecard workflow spacing (#506)
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-12-09 15:05:19 -05:00
dependabot[bot] 61b81112bd
chore(deps): bump tokio from 1.41.1 to 1.42.0 (#510)
Bumps [tokio](https://github.com/tokio-rs/tokio) from 1.41.1 to 1.42.0.
- [Release notes](https://github.com/tokio-rs/tokio/releases)
- [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.41.1...tokio-1.42.0)

---
updated-dependencies:
- dependency-name: tokio
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-12-09 12:29:26 -06:00
dependabot[bot] b2207ef41f
chore(deps): bump clap from 4.5.21 to 4.5.23 (#511)
Bumps [clap](https://github.com/clap-rs/clap) from 4.5.21 to 4.5.23.
- [Release notes](https://github.com/clap-rs/clap/releases)
- [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md)
- [Commits](https://github.com/clap-rs/clap/compare/clap_complete-v4.5.21...clap_complete-v4.5.23)

---
updated-dependencies:
- dependency-name: clap
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-12-09 12:28:36 -06:00
dependabot[bot] 0cc63485f4
chore(deps): bump anyhow from 1.0.93 to 1.0.94 (#512)
Bumps [anyhow](https://github.com/dtolnay/anyhow) from 1.0.93 to 1.0.94.
- [Release notes](https://github.com/dtolnay/anyhow/releases)
- [Commits](https://github.com/dtolnay/anyhow/compare/1.0.93...1.0.94)

---
updated-dependencies:
- dependency-name: anyhow
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-12-09 10:38:40 -06:00
Joonas Bergius 31cf33a9b7
chore: Add OSSF Scorecard workflow (#504)
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-12-06 15:28:16 -05:00
Joonas Bergius fb2b74532b
fix: RUSTSEC-2024-0402 (#503)
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-12-06 15:28:01 -05:00
Ahmed Tadde ca5a63104a
fix: detect spread scaler requirements violation (#491)
---------

Signed-off-by: Ahmed <ahmedtadde@gmail.com>
Co-authored-by: Brooks Townsend <brooksmtownsend@gmail.com>
2024-12-05 01:21:58 -05:00
Joonas Bergius 21feab093f
chore(ci): Set token permissions for GitHub Actions workflows (#498)
Signed-off-by: Joonas Bergius <joonas@bergi.us>
2024-12-02 17:31:16 +00:00
dependabot[bot] eb6fce9255 chore(deps): bump thiserror from 1.0.65 to 1.0.69
Bumps [thiserror](https://github.com/dtolnay/thiserror) from 1.0.65 to 1.0.69.
- [Release notes](https://github.com/dtolnay/thiserror/releases)
- [Commits](https://github.com/dtolnay/thiserror/compare/1.0.65...1.0.69)

---
updated-dependencies:
- dependency-name: thiserror
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-12-02 10:38:08 -05:00
dependabot[bot] 087203cdbc chore(deps): bump indexmap from 2.6.0 to 2.7.0
Bumps [indexmap](https://github.com/indexmap-rs/indexmap) from 2.6.0 to 2.7.0.
- [Changelog](https://github.com/indexmap-rs/indexmap/blob/master/RELEASES.md)
- [Commits](https://github.com/indexmap-rs/indexmap/compare/2.6.0...2.7.0)

---
updated-dependencies:
- dependency-name: indexmap
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-12-02 10:38:01 -05:00
dependabot[bot] 6e35596a22 chore(deps): bump bytes from 1.8.0 to 1.9.0
Bumps [bytes](https://github.com/tokio-rs/bytes) from 1.8.0 to 1.9.0.
- [Release notes](https://github.com/tokio-rs/bytes/releases)
- [Changelog](https://github.com/tokio-rs/bytes/blob/master/CHANGELOG.md)
- [Commits](https://github.com/tokio-rs/bytes/compare/v1.8.0...v1.9.0)

---
updated-dependencies:
- dependency-name: bytes
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-12-02 10:37:45 -05:00
dependabot[bot] 2d47f32fc5
chore(deps): bump serde_json from 1.0.132 to 1.0.133 (#496)
Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.132 to 1.0.133.
- [Release notes](https://github.com/serde-rs/json/releases)
- [Commits](https://github.com/serde-rs/json/compare/v1.0.132...v1.0.133)

---
updated-dependencies:
- dependency-name: serde_json
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-11-30 09:39:59 -06:00
Joonas Bergius 2c00cada86
chore(wadm-cli): prune unused dependencies (#487)
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-11-20 00:29:15 -06:00
Joonas Bergius d1b9d925d2
chore(wadm): prune unused dependencies (#486)
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-11-20 00:28:58 -06:00
Joonas Bergius db38c50600
chore(wadm-client): prune unused dependencies (#485)
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-11-20 00:28:44 -06:00
Joonas Bergius 964a586ab6
chore(wadm-types): prune unused dependencies (#484)
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-11-20 00:28:31 -06:00
Sudhanshu Pandey 6c425a198c
chore: Update the Github Action to set correct tag for the Docker Image (#493)
Signed-off-by: Sudhanshu Pandey <sp6370@nyu.edu>
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
Co-authored-by: Joonas Bergius <joonas@users.noreply.github.com>
2024-11-20 00:27:37 -06:00
dependabot[bot] 0fb04cfee4
chore(deps): bump serde from 1.0.214 to 1.0.215 (#495)
Bumps [serde](https://github.com/serde-rs/serde) from 1.0.214 to 1.0.215.
- [Release notes](https://github.com/serde-rs/serde/releases)
- [Commits](https://github.com/serde-rs/serde/compare/v1.0.214...v1.0.215)

---
updated-dependencies:
- dependency-name: serde
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-11-18 15:44:10 +00:00
dependabot[bot] 066eccdbd2
chore(deps): bump clap from 4.5.20 to 4.5.21 (#494)
Bumps [clap](https://github.com/clap-rs/clap) from 4.5.20 to 4.5.21.
- [Release notes](https://github.com/clap-rs/clap/releases)
- [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md)
- [Commits](https://github.com/clap-rs/clap/compare/clap_complete-v4.5.20...clap_complete-v4.5.21)

---
updated-dependencies:
- dependency-name: clap
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-11-18 15:43:54 +00:00
dependabot[bot] 4bd2560bdd
chore(deps): bump anyhow from 1.0.92 to 1.0.93 (#490)
Bumps [anyhow](https://github.com/dtolnay/anyhow) from 1.0.92 to 1.0.93.
- [Release notes](https://github.com/dtolnay/anyhow/releases)
- [Commits](https://github.com/dtolnay/anyhow/compare/1.0.92...1.0.93)

---
updated-dependencies:
- dependency-name: anyhow
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-11-18 09:40:58 -06:00
dependabot[bot] 57e1807be8
chore(deps): bump tokio from 1.41.0 to 1.41.1 (#489)
Bumps [tokio](https://github.com/tokio-rs/tokio) from 1.41.0 to 1.41.1.
- [Release notes](https://github.com/tokio-rs/tokio/releases)
- [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.41.0...tokio-1.41.1)

---
updated-dependencies:
- dependency-name: tokio
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-11-18 09:40:50 -06:00
dependabot[bot] ef32c26fa0
chore(deps): bump serial_test from 3.1.1 to 3.2.0 (#488)
Bumps [serial_test](https://github.com/palfrey/serial_test) from 3.1.1 to 3.2.0.
- [Release notes](https://github.com/palfrey/serial_test/releases)
- [Commits](https://github.com/palfrey/serial_test/compare/v3.1.1...v3.2.0)

---
updated-dependencies:
- dependency-name: serial_test
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-11-18 09:40:18 -06:00
Victor Adossi 1c4b706b17
chore(dx): remove deprecated crates extension (#492)
Signed-off-by: Victor Adossi <vadossi@cosmonic.com>
2024-11-14 10:32:43 -07:00
Joonas Bergius c48802566e
chore: Bump client and types 0.7.1 (#483)
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-11-09 09:34:16 -06:00
Joonas Bergius 42cc8672d1
chore(ci): pin zig to latest stable version (#482)
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-11-09 09:29:39 -05:00
Joonas Bergius 9272799f62
chore: Bump wasmcloud-secrets-types (#481)
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-11-09 09:29:30 -05:00
Joonas Bergius cebb511d28
chore: Bump wascap to 0.15.2 (#480)
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-11-08 19:58:07 -06:00
dependabot[bot] d0faba952d chore(deps): bump serde from 1.0.213 to 1.0.214
Bumps [serde](https://github.com/serde-rs/serde) from 1.0.213 to 1.0.214.
- [Release notes](https://github.com/serde-rs/serde/releases)
- [Commits](https://github.com/serde-rs/serde/compare/v1.0.213...v1.0.214)

---
updated-dependencies:
- dependency-name: serde
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-11-04 09:44:49 -05:00
dependabot[bot] f59cfa2f7d chore(deps): bump utoipa from 5.1.3 to 5.2.0
Bumps [utoipa](https://github.com/juhaku/utoipa) from 5.1.3 to 5.2.0.
- [Release notes](https://github.com/juhaku/utoipa/releases)
- [Changelog](https://github.com/juhaku/utoipa/blob/master/utoipa-rapidoc/CHANGELOG.md)
- [Commits](https://github.com/juhaku/utoipa/compare/utoipa-5.1.3...utoipa-5.2.0)

---
updated-dependencies:
- dependency-name: utoipa
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-11-04 09:44:26 -05:00
dependabot[bot] 0e78489a56 chore(deps): bump anyhow from 1.0.91 to 1.0.92
Bumps [anyhow](https://github.com/dtolnay/anyhow) from 1.0.91 to 1.0.92.
- [Release notes](https://github.com/dtolnay/anyhow/releases)
- [Commits](https://github.com/dtolnay/anyhow/compare/1.0.91...1.0.92)

---
updated-dependencies:
- dependency-name: anyhow
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-11-04 09:44:20 -05:00
dependabot[bot] 466f6ff402 chore(deps): bump serde from 1.0.210 to 1.0.213
Bumps [serde](https://github.com/serde-rs/serde) from 1.0.210 to 1.0.213.
- [Release notes](https://github.com/serde-rs/serde/releases)
- [Commits](https://github.com/serde-rs/serde/compare/v1.0.210...v1.0.213)

---
updated-dependencies:
- dependency-name: serde
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-10-28 10:46:29 -04:00
dependabot[bot] bd2cc980c7 chore(deps): bump thiserror from 1.0.64 to 1.0.65
Bumps [thiserror](https://github.com/dtolnay/thiserror) from 1.0.64 to 1.0.65.
- [Release notes](https://github.com/dtolnay/thiserror/releases)
- [Commits](https://github.com/dtolnay/thiserror/compare/1.0.64...1.0.65)

---
updated-dependencies:
- dependency-name: thiserror
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-10-28 10:46:24 -04:00
dependabot[bot] 955905148c chore(deps): bump utoipa from 5.1.1 to 5.1.3
Bumps [utoipa](https://github.com/juhaku/utoipa) from 5.1.1 to 5.1.3.
- [Release notes](https://github.com/juhaku/utoipa/releases)
- [Changelog](https://github.com/juhaku/utoipa/blob/master/utoipa-rapidoc/CHANGELOG.md)
- [Commits](https://github.com/juhaku/utoipa/compare/utoipa-5.1.1...utoipa-5.1.3)

---
updated-dependencies:
- dependency-name: utoipa
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-10-28 10:46:17 -04:00
dependabot[bot] b9da5ee9f6 chore(deps): bump tokio from 1.40.0 to 1.41.0
Bumps [tokio](https://github.com/tokio-rs/tokio) from 1.40.0 to 1.41.0.
- [Release notes](https://github.com/tokio-rs/tokio/releases)
- [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.40.0...tokio-1.41.0)

---
updated-dependencies:
- dependency-name: tokio
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-10-28 10:45:55 -04:00
dependabot[bot] 81d41b3cd8 chore(deps): bump bytes from 1.7.2 to 1.8.0
Bumps [bytes](https://github.com/tokio-rs/bytes) from 1.7.2 to 1.8.0.
- [Release notes](https://github.com/tokio-rs/bytes/releases)
- [Changelog](https://github.com/tokio-rs/bytes/blob/master/CHANGELOG.md)
- [Commits](https://github.com/tokio-rs/bytes/compare/v1.7.2...v1.8.0)

---
updated-dependencies:
- dependency-name: bytes
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-10-28 10:45:47 -04:00
dependabot[bot] fbf29a9350 chore(deps): bump actions/setup-python from 5.2.0 to 5.3.0
Bumps [actions/setup-python](https://github.com/actions/setup-python) from 5.2.0 to 5.3.0.
- [Release notes](https://github.com/actions/setup-python/releases)
- [Commits](https://github.com/actions/setup-python/compare/v5.2.0...v5.3.0)

---
updated-dependencies:
- dependency-name: actions/setup-python
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-10-28 09:51:15 -04:00
Dan Norris cfc7c4504a
fix(chart): reference the correct value for the Jetstream domain (#467)
Pull the `jetstreamDomain` value from `config.wadm.nats.jetstreamDomain`
instead of from `config.wadm.jetstreamDomain` since that is what we
define in the values file. It is also a more logical way to group the
value than what the chart was expecting.

Also bump the default version of wadm to the latest one.

Signed-off-by: Dan Norris <protochron@users.noreply.github.com>
2024-10-24 10:29:48 -04:00
Brooks Townsend 6f29e72932 release(wadm): 0.18, types and client 0.7
Signed-off-by: Brooks Townsend <brooksmtownsend@gmail.com>
2024-10-22 16:37:08 -04:00
dependabot[bot] 9ac409a28d chore(deps): bump anyhow from 1.0.89 to 1.0.91
Bumps [anyhow](https://github.com/dtolnay/anyhow) from 1.0.89 to 1.0.91.
- [Release notes](https://github.com/dtolnay/anyhow/releases)
- [Commits](https://github.com/dtolnay/anyhow/compare/1.0.89...1.0.91)

---
updated-dependencies:
- dependency-name: anyhow
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-10-22 14:28:00 -04:00
dependabot[bot] 1309c9bf1f chore(deps): bump uuid from 1.10.0 to 1.11.0
Bumps [uuid](https://github.com/uuid-rs/uuid) from 1.10.0 to 1.11.0.
- [Release notes](https://github.com/uuid-rs/uuid/releases)
- [Commits](https://github.com/uuid-rs/uuid/compare/1.10.0...1.11.0)

---
updated-dependencies:
- dependency-name: uuid
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-10-22 14:25:52 -04:00
dependabot[bot] 54740fbf62 chore(deps): bump serde_json from 1.0.128 to 1.0.132
Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.128 to 1.0.132.
- [Release notes](https://github.com/serde-rs/json/releases)
- [Commits](https://github.com/serde-rs/json/compare/1.0.128...1.0.132)

---
updated-dependencies:
- dependency-name: serde_json
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-10-22 14:22:09 -04:00
Joonas Bergius eb34a928c6
fix(wadm-types): Address RUSTSEC-2024-0370 (#461)
* fix(wadm-types): Address RUSTSEC-2024-0370

Signed-off-by: Joonas Bergius <joonas@cosmonic.com>

* chore: Bump wadm-types version

Signed-off-by: Joonas Bergius <joonas@cosmonic.com>

---------

Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-10-22 11:20:10 -07:00
Joonas Bergius 4d2fc1a406
chore: Swap wolfi-base source to cgr.dev instead of Docker Hub (#462)
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-10-21 16:15:25 -05:00
Brooks Townsend 08da607ad9 release(wadm): v0.18.0-rc.1
Signed-off-by: Brooks Townsend <brooksmtownsend@gmail.com>
2024-10-21 17:07:14 -04:00
Brooks Townsend 9972d4d903 refactor(wadm): address clippy warnings
Signed-off-by: Brooks Townsend <brooksmtownsend@gmail.com>
2024-10-18 12:49:04 -04:00
Brooks Townsend b459bea3fb test(upgrades): add link name for wasmCloud 1.3
Signed-off-by: Brooks Townsend <brooksmtownsend@gmail.com>
2024-10-18 12:49:04 -04:00
Brooks Townsend b7ef888072 chore: prefix shared annotation with experimental
Signed-off-by: Brooks Townsend <brooksmtownsend@gmail.com>
2024-10-18 12:49:04 -04:00
Brooks Townsend aa2689ab36 feat(server): ensure deployed apps find shared components
Signed-off-by: Brooks Townsend <brooksmtownsend@gmail.com>
2024-10-18 12:49:04 -04:00
Brooks Townsend ec08ba7316 test: add invalid shared tests
Signed-off-by: Brooks Townsend <brooksmtownsend@gmail.com>
2024-10-18 12:49:04 -04:00
Brooks Townsend 471f07fe67 chore(wit): update bindings for shared applications
Signed-off-by: Brooks Townsend <brooksmtownsend@gmail.com>
2024-10-18 12:49:04 -04:00
Brooks Townsend 0dbb3d102c test: add e2e_shared integration test
Signed-off-by: Brooks Townsend <brooksmtownsend@gmail.com>
2024-10-18 12:49:04 -04:00
Brooks Townsend 8830527b43 feat: add status_scaler
Signed-off-by: Brooks Townsend <brooksmtownsend@gmail.com>
2024-10-18 12:49:04 -04:00
Brooks Townsend 434aeafbb8 feat!: support shared components and providers
Signed-off-by: Brooks Townsend <brooksmtownsend@gmail.com>

fix: shared components id generation

Signed-off-by: Brooks Townsend <brooksmtownsend@gmail.com>
2024-10-18 12:49:04 -04:00
Taylor Thomas 05d5242d27 chore: Pull in slightly older version of regex
Because transitive deps suck. We need this so we can update the OCI deps
in the main host

Signed-off-by: Taylor Thomas <taylor@cosmonic.com>
2024-10-15 12:21:11 -06:00
dependabot[bot] 77c012d6d1
chore(deps): bump clap from 4.5.19 to 4.5.20 (#454)
Bumps [clap](https://github.com/clap-rs/clap) from 4.5.19 to 4.5.20.
- [Release notes](https://github.com/clap-rs/clap/releases)
- [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md)
- [Commits](https://github.com/clap-rs/clap/compare/clap_complete-v4.5.19...clap_complete-v4.5.20)

---
updated-dependencies:
- dependency-name: clap
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-10-14 09:06:42 -05:00
Brooks Townsend 3a066c35c6 chore(MAINTAINERS): add organizations
Signed-off-by: Brooks Townsend <brooksmtownsend@gmail.com>
2024-10-12 17:44:22 -06:00
Brooks Townsend e07481a66c chore: add MAINTAINERS.md
Signed-off-by: Brooks Townsend <brooksmtownsend@gmail.com>
2024-10-12 14:51:43 -04:00
Joonas Bergius 4b7233af2c
release: Bump wadm to 0.17.0 (#449)
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-10-09 14:21:17 -05:00
92 changed files with 7757 additions and 2687 deletions

View File

@ -19,8 +19,7 @@
},
"extensions": [
"rust-lang.rust-analyzer",
"tamasfe.even-better-toml",
"serayuzgur.crates"
"tamasfe.even-better-toml"
]
}
},

5
.envrc Normal file
View File

@ -0,0 +1,5 @@
if ! has nix_direnv_version || ! nix_direnv_version 3.0.6; then
source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.6/direnvrc" "sha256-RYcUJaRMf8oF5LznDrlCXbkOQrywm0HDv1VjYGaJGdM="
fi
watch_file rust-toolchain.toml
use flake

View File

@ -0,0 +1,38 @@
name: Install and configure wkg (linux only)
inputs:
wkg-version:
description: version of wkg to install. Should be a valid tag from https://github.com/bytecodealliance/wasm-pkg-tools/releases
default: "v0.6.0"
oci-username:
description: username for oci registry
required: true
oci-password:
description: password for oci registry
required: true
runs:
using: composite
steps:
- name: Download wkg
shell: bash
run: |
curl --fail -L https://github.com/bytecodealliance/wasm-pkg-tools/releases/download/${{ inputs.wkg-version }}/wkg-x86_64-unknown-linux-gnu -o wkg
chmod +x wkg;
echo "$(realpath .)" >> "$GITHUB_PATH";
- name: Generate and set wkg config
shell: bash
env:
WKG_OCI_USERNAME: ${{ inputs.oci-username }}
WKG_OCI_PASSWORD: ${{ inputs.oci-password }}
run: |
cat << EOF > wkg-config.toml
[namespace_registries]
wasmcloud = "wasmcloud.com"
wrpc = "bytecodealliance.org"
wasi = "wasi.dev"
[registry."wasmcloud.com".oci]
auth = { username = "${WKG_OCI_USERNAME}", password = "${WKG_OCI_PASSWORD}" }
EOF
echo "WKG_CONFIG_FILE=$(realpath wkg-config.toml)" >> $GITHUB_ENV

6
.github/release.yml vendored Normal file
View File

@ -0,0 +1,6 @@
# .github/release.yml
changelog:
exclude:
authors:
- dependabot

View File

@ -13,12 +13,15 @@ on:
- 'charts/**'
- '.github/workflows/chart.yml'
permissions:
contents: read
jobs:
validate:
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
fetch-depth: 0
@ -27,18 +30,18 @@ jobs:
git fetch origin main:main
- name: Set up Helm
uses: azure/setup-helm@v4
uses: azure/setup-helm@b9e51907a09c216f16ebe8536097933489208112 # v4.3.0
with:
version: ${{ env.HELM_VERSION }}
# Used by helm chart-testing below
- name: Set up Python
uses: actions/setup-python@v5.2.0
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0
with:
python-version: '3.12.2'
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.6.1
uses: helm/chart-testing-action@0d28d3144d3a25ea2cc349d6e59901c4ff469b3b # v2.7.0
with:
version: v3.10.1
yamllint_version: 1.35.1
@ -49,7 +52,7 @@ jobs:
ct lint --config charts/wadm/ct.yaml
- name: Create kind cluster
uses: helm/kind-action@v1.10.0
uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0
with:
version: "v0.22.0"
@ -72,13 +75,14 @@ jobs:
runs-on: ubuntu-22.04
needs: validate
permissions:
contents: read
packages: write
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Set up Helm
uses: azure/setup-helm@v4
uses: azure/setup-helm@b9e51907a09c216f16ebe8536097933489208112 # v4.3.0
with:
version: ${{ env.HELM_VERSION }}
@ -87,7 +91,7 @@ jobs:
helm package charts/wadm -d .helm-charts
- name: Login to GHCR
uses: docker/login-action@v3
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
with:
registry: ghcr.io
username: ${{ github.repository_owner }}

View File

@ -5,6 +5,9 @@ on:
branches:
- main
permissions:
contents: read
jobs:
test:
name: e2e
@ -12,21 +15,19 @@ jobs:
strategy:
fail-fast: false
matrix:
# TODO: Re-enable the multitenant and upgrades tests in followup to #247
# test: [e2e_multiple_hosts, e2e_multitenant, e2e_upgrades]
test: [e2e_multiple_hosts, e2e_upgrades]
test: [e2e_multiple_hosts, e2e_upgrades, e2e_shared]
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install latest Rust stable toolchain
uses: dtolnay/rust-toolchain@stable
uses: dtolnay/rust-toolchain@1ff72ee08e3cb84d84adba594e0a297990fc1ed3 # stable
with:
toolchain: stable
components: clippy, rustfmt
# Cache: rust
- uses: Swatinem/rust-cache@v2
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
with:
key: 'ubuntu-22.04-rust-cache'
@ -46,7 +47,7 @@ jobs:
# if the previous step fails, upload logs
- name: Upload logs for debugging
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
if: ${{ failure() && steps.test.outcome == 'failure' }}
with:
name: e2e-logs-${{ matrix.test }}

View File

@ -9,6 +9,9 @@ on:
- 'client-v*'
workflow_dispatch: # Allow manual creation of artifacts without a release
permissions:
contents: read
defaults:
run:
shell: bash
@ -58,7 +61,7 @@ jobs:
buildOutputPath: 'target/x86_64-pc-windows-msvc/release/wadm.exe',
}
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: set the release version (tag)
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
@ -75,17 +78,19 @@ jobs:
run: echo "version=$RELEASE_VERSION" >> $GITHUB_OUTPUT
- name: Install Zig
uses: goto-bus-stop/setup-zig@v2
uses: mlugg/setup-zig@7dccf5e6d09267c55f815f2db29495f30ba2ebca # v2.0.1
with:
version: 0.13.0
- name: Install latest Rust stable toolchain
uses: dtolnay/rust-toolchain@stable
uses: dtolnay/rust-toolchain@1ff72ee08e3cb84d84adba594e0a297990fc1ed3 # stable
with:
toolchain: stable
components: clippy, rustfmt
target: ${{ matrix.config.target }}
- name: Install cargo zigbuild
uses: taiki-e/install-action@v2
uses: taiki-e/install-action@cfe1303741c2e620e5f7daa667105e0da1316db9 # v2.53.0
with:
tool: cargo-zigbuild
@ -93,7 +98,7 @@ jobs:
run: |
${{ matrix.config.buildCommand }} --release --bin wadm --target ${{ matrix.config.target }}
- uses: actions/upload-artifact@v4
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: wadm-${{ env.RELEASE_VERSION }}-${{ matrix.config.uploadArtifactSuffix }}
if-no-files-found: error
@ -105,11 +110,13 @@ jobs:
name: publish release assets
runs-on: ubuntu-latest
needs: build
permissions:
contents: write
env:
RELEASE_VERSION: ${{ needs.build.outputs.version_output }}
steps:
- name: Download release assets
uses: actions/download-artifact@v4
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
- name: Prepare release
run: |
@ -121,7 +128,7 @@ jobs:
done
- name: Create github release
uses: softprops/action-gh-release@v2
uses: softprops/action-gh-release@72f2c25fcb47643c292f7107632f7a47c1df5cd8 # v2.3.2
with:
token: ${{ secrets.GITHUB_TOKEN }}
prerelease: false
@ -140,9 +147,9 @@ jobs:
runs-on: ubuntu-latest
needs: build
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install latest Rust stable toolchain
uses: dtolnay/rust-toolchain@stable
uses: dtolnay/rust-toolchain@1ff72ee08e3cb84d84adba594e0a297990fc1ed3 # stable
with:
toolchain: stable
@ -178,15 +185,15 @@ jobs:
env:
RELEASE_VERSION: ${{ needs.build.outputs.version_output }}
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v3.6.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0
- uses: actions/download-artifact@v4
- uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4.3.0
with:
path: ./artifacts
pattern: '*linux*'
@ -203,7 +210,7 @@ jobs:
done
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v3.4.0
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
@ -213,8 +220,13 @@ jobs:
run: |
echo "OWNER=${GITHUB_REPOSITORY_OWNER,,}" >>$GITHUB_ENV
- name: Set the formatted release version for the docker tag
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
run: |
echo "RELEASE_VERSION_DOCKER_TAG=${RELEASE_VERSION#v}" >> $GITHUB_ENV
- name: Build and push (tag)
uses: docker/build-push-action@v6
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
with:
push: true
@ -223,10 +235,13 @@ jobs:
build-args: |
BIN_ARM64=./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-aarch64
BIN_AMD64=./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-amd64
tags: ghcr.io/${{ env.OWNER }}/wadm:latest,ghcr.io/${{ env.OWNER }}/wadm:${{ env.RELEASE_VERSION }}
tags: |
ghcr.io/${{ env.OWNER }}/wadm:latest
ghcr.io/${{ env.OWNER }}/wadm:${{ env.RELEASE_VERSION }},
ghcr.io/${{ env.OWNER }}/wadm:${{ env.RELEASE_VERSION_DOCKER_TAG }}
- name: Build and push wolfi (tag)
uses: docker/build-push-action@v6
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
if: ${{ startsWith(github.ref, 'refs/tags/v') }}
with:
push: true
@ -236,10 +251,13 @@ jobs:
build-args: |
BIN_ARM64=./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-aarch64
BIN_AMD64=./artifacts/wadm-${{ env.RELEASE_VERSION }}-linux-amd64
tags: ghcr.io/${{ env.OWNER }}/wadm:latest-wolfi,ghcr.io/${{ env.OWNER }}/wadm:${{ env.RELEASE_VERSION }}-wolfi
tags: |
ghcr.io/${{ env.OWNER }}/wadm:latest-wolfi
ghcr.io/${{ env.OWNER }}/wadm:${{ env.RELEASE_VERSION }}-wolfi
ghcr.io/${{ env.OWNER }}/wadm:${{ env.RELEASE_VERSION_DOCKER_TAG }}-wolfi
- name: Build and push (main)
uses: docker/build-push-action@v6
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
if: ${{ github.ref == 'refs/heads/main' }}
with:
push: true
@ -251,7 +269,7 @@ jobs:
tags: ghcr.io/${{ env.OWNER }}/wadm:canary
- name: Build and push (main)
uses: docker/build-push-action@v6
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
if: ${{ github.ref == 'refs/heads/main' }}
with:
push: true

73
.github/workflows/scorecard.yml vendored Normal file
View File

@ -0,0 +1,73 @@
# This workflow uses actions that are not certified by GitHub. They are provided
# by a third-party and are governed by separate terms of service, privacy
# policy, and support documentation.
name: Scorecard supply-chain security
on:
# For Branch-Protection check. Only the default branch is supported. See
# https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection
branch_protection_rule:
# To guarantee Maintained check is occasionally updated. See
# https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained
schedule:
- cron: '28 13 * * 3'
push:
branches: [ "main" ]
# Declare default permissions as read only.
permissions: read-all
jobs:
analysis:
name: Scorecard analysis
runs-on: ubuntu-latest
permissions:
# Needed to upload the results to code-scanning dashboard.
security-events: write
# Needed to publish results and get a badge (see publish_results below).
id-token: write
# Uncomment the permissions below if installing in a private repository.
# contents: read
# actions: read
steps:
- name: "Checkout code"
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
persist-credentials: false
- name: "Run analysis"
uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.4.2
with:
results_file: results.sarif
results_format: sarif
# (Optional) "write" PAT token. Uncomment the `repo_token` line below if:
# - you want to enable the Branch-Protection check on a *public* repository, or
# - you are installing Scorecard on a *private* repository
# To create the PAT, follow the steps in https://github.com/ossf/scorecard-action?tab=readme-ov-file#authentication-with-fine-grained-pat-optional.
# repo_token: ${{ secrets.SCORECARD_TOKEN }}
# Public repositories:
# - Publish results to OpenSSF REST API for easy access by consumers
# - Allows the repository to include the Scorecard badge.
# - See https://github.com/ossf/scorecard-action#publishing-results.
# For private repositories:
# - `publish_results` will always be set to `false`, regardless
# of the value entered here.
publish_results: true
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
# format to the repository Actions tab.
- name: "Upload artifact"
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v3.pre.node20
with:
name: SARIF file
path: results.sarif
retention-days: 5
# Upload the results to GitHub's code scanning dashboard (optional).
# Commenting out will disable upload of results to your repo's Code Scanning dashboard
- name: "Upload to code-scanning"
uses: github/codeql-action/upload-sarif@fca7ace96b7d713c7035871441bd52efbe39e27e # v3.28.19
with:
sarif_file: results.sarif

View File

@ -5,6 +5,9 @@ on:
branches:
- main
permissions:
contents: read
jobs:
test:
name: Test
@ -12,19 +15,19 @@ jobs:
strategy:
matrix:
os: [ubuntu-22.04]
nats_version: [2.10.7]
nats_version: [2.10.22]
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
- name: Install latest Rust stable toolchain
uses: dtolnay/rust-toolchain@stable
uses: dtolnay/rust-toolchain@1ff72ee08e3cb84d84adba594e0a297990fc1ed3 # stable
with:
toolchain: stable
components: clippy, rustfmt
# Cache: rust
- uses: Swatinem/rust-cache@v2
- uses: Swatinem/rust-cache@9d47c6ad4b02e050fd481d890b2ea34778fd09d6 # v2.7.8
with:
key: '${{ matrix.os }}-rust-cache'
@ -36,8 +39,11 @@ jobs:
echo 'Wadm JSON Schema is out of date. Please run `cargo run --bin wadm-schema` and commit the changes.'
exit 1
fi
- name: Install wash
uses: wasmCloud/common-actions/install-wash@main
- name: install wash
uses: taiki-e/install-action@cfe1303741c2e620e5f7daa667105e0da1316db9 # v2.53.0
with:
tool: wash@0.38.0
# GH Actions doesn't currently support passing args to service containers and there is no way
# to use an environment variable to turn on jetstream for nats, so we manually start it here
@ -48,6 +54,15 @@ jobs:
run: |
cargo build --all-features --all-targets --workspace
# Make sure the wadm crate works well with feature combinations
# The above command builds the workspace and tests with no features
- name: Check wadm crate with features
run: |
cargo check -p wadm --no-default-features
cargo check -p wadm --features cli
cargo check -p wadm --features http_admin
cargo check -p wadm --features cli,http_admin
# Run all tests
- name: Run tests
run: |

View File

@ -3,28 +3,45 @@ name: wit-wasmcloud-wadm-publish
on:
push:
tags:
- 'wit-wasmcloud-wadm-v*'
- "wit-wasmcloud-wadm-v*"
permissions:
contents: read
jobs:
build:
runs-on: ubuntu-latest
permissions:
contents: write
packages: write
steps:
- uses: actions/checkout@v4
with:
sparse-checkout: |
wit
- name: Extract tag context
id: ctx
run: |
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
with:
sparse-checkout: |
wit
.github
- name: Extract tag context
id: ctx
run: |
version=${GITHUB_REF_NAME#wit-wasmcloud-wadm-v}
echo "version=${version}" >> "$GITHUB_OUTPUT"
echo "tarball=wit-wasmcloud-wadm-${version}.tar.gz" >> "$GITHUB_OUTPUT"
echo "version is ${version}"
- name: Build
run: |
tar cvzf ${{ steps.ctx.outputs.tarball }} -C wit wadm/wit
- name: Release
uses: softprops/action-gh-release@v2
with:
files: ${{ steps.ctx.outputs.tarball }}
make_latest: "false"
- uses: ./.github/actions/configure-wkg
with:
oci-username: ${{ github.repository_owner }}
oci-password: ${{ secrets.GITHUB_TOKEN }}
- name: Build
run: wkg wit build --wit-dir wit/wadm -o package.wasm
- name: Push version-tagged WebAssembly binary to GHCR
run: wkg publish package.wasm
- name: Package tarball for release
run: |
mkdir -p release/wit
cp wit/wadm/*.wit release/wit/
tar cvzf ${{ steps.ctx.outputs.tarball }} -C release wit
- name: Release
uses: softprops/action-gh-release@72f2c25fcb47643c292f7107632f7a47c1df5cd8 # v2.3.2
with:
files: ${{ steps.ctx.outputs.tarball }}
make_latest: "false"

5
.gitignore vendored
View File

@ -8,4 +8,7 @@ tests/e2e_log/
# Ignore IDE specific files
.idea/
.vscode/
.vscode/
.direnv/
result

2682
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -8,9 +8,10 @@ keywords = ["webassembly", "wasmcloud", "wadm"]
license = "Apache-2.0"
readme = "README.md"
repository = "https://github.com/wasmcloud/wadm"
default-run = "wadm"
[workspace.package]
version = "0.16.1"
version = "0.21.0"
[features]
default = []
@ -22,11 +23,7 @@ members = ["crates/*"]
[dependencies]
anyhow = { workspace = true }
async-nats = { workspace = true }
async-trait = { workspace = true }
clap = { workspace = true, features = ["derive", "cargo", "env"] }
futures = { workspace = true }
nkeys = { workspace = true }
# One version back to avoid clashes with 0.10 of otlp
opentelemetry = { workspace = true, features = ["rt-tokio"] }
# 0.10 to avoid protoc dep
@ -38,26 +35,28 @@ schemars = { workspace = true }
serde_json = { workspace = true }
tokio = { workspace = true, features = ["full"] }
tracing = { workspace = true, features = ["log"] }
tracing-futures = { workspace = true }
tracing-opentelemetry = { workspace = true }
tracing-subscriber = { workspace = true, features = ["env-filter", "json"] }
wasmcloud-control-interface = { workspace = true }
wadm = { workspace = true }
wadm = { workspace = true, features = ["cli", "http_admin"] }
wadm-types = { workspace = true }
[workspace.dependencies]
anyhow = "1"
async-nats = "0.36"
async-nats = "0.39"
async-trait = "0.1"
bytes = "1"
chrono = "0.4"
clap = { version = "4", features = ["derive", "cargo", "env"] }
cloudevents-sdk = "0.7"
cloudevents-sdk = "0.8"
futures = "0.3"
http = { version = "1", default-features = false }
http-body-util = { version = "0.1", default-features = false }
hyper = { version = "1", default-features = false }
hyper-util = { version = "0.1", default-features = false }
indexmap = { version = "2", features = ["serde"] }
jsonschema = "0.17"
jsonschema = "0.29"
lazy_static = "1"
nkeys = "0.4.4"
nkeys = "0.4.5"
# One version back to avoid clashes with 0.10 of otlp
opentelemetry = { version = "0.17", features = ["rt-tokio"] }
# 0.10 to avoid protoc dep
@ -65,39 +64,45 @@ opentelemetry-otlp = { version = "0.10", features = [
"http-proto",
"reqwest-client",
] }
rand = { version = "0.8", features = ["small_rng"] }
regex = "1.11.0"
rand = { version = "0.9", features = ["small_rng"] }
# NOTE(thomastaylor312): Pinning this temporarily to 1.10 due to transitive dependency with oci
# crates that are pinned to 1.10
regex = "~1.10"
schemars = "0.8"
semver = { version = "1.0.16", features = ["serde"] }
semver = { version = "1.0.25", features = ["serde"] }
serde = "1"
serde_json = "1"
serde_yaml = "0.9"
sha2 = "0.10.2"
thiserror = "1"
sha2 = "0.10.9"
thiserror = "2"
tokio = { version = "1", default-features = false }
tracing = { version = "0.1", features = ["log"] }
tracing-futures = "0.2"
tracing-opentelemetry = { version = "0.17" }
tracing-subscriber = { version = "0.3.7", features = ["env-filter", "json"] }
ulid = { version = "1", features = ["serde"] }
utoipa = "4"
utoipa = "5"
uuid = "1"
wadm = { version = "0.16.0", path = "./crates/wadm" }
wadm-client = { version = "0.6.0", path = "./crates/wadm-client" }
wadm-types = { version = "0.6.0", path = "./crates/wadm-types" }
wasmcloud-control-interface = { version = "2.2.0" }
wasmcloud-secrets-types = "0.2.0"
wit-bindgen-wrpc = { version = "0.3.7", default-features = false }
wadm = { version = "0.21", path = "./crates/wadm" }
wadm-client = { version = "0.10", path = "./crates/wadm-client" }
wadm-types = { version = "0.8", path = "./crates/wadm-types" }
wasmcloud-control-interface = "2.4.0"
wasmcloud-secrets-types = "0.5.0"
wit-bindgen-wrpc = { version = "0.9", default-features = false }
wit-bindgen = { version = "0.36.0", default-features = false }
[dev-dependencies]
async-nats = { workspace = true }
chrono = { workspace = true }
futures = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
serde_yaml = { workspace = true }
serial_test = "3"
wadm-client = { workspace = true }
wadm-types = { workspace = true }
testcontainers = "0.23"
wasmcloud-control-interface = { workspace = true }
testcontainers = "0.24"
[build-dependencies]
schemars = { workspace = true }

View File

@ -1,4 +1,4 @@
FROM chainguard/wolfi-base:latest AS base
FROM cgr.dev/chainguard/wolfi-base:latest AS base
FROM base AS base-amd64
ARG BIN_AMD64

25
MAINTAINERS.md Normal file
View File

@ -0,0 +1,25 @@
# MAINTAINERS
The following individuals are responsible for reviewing code, managing issues, and ensuring the overall quality of `wadm`.
## @wasmCloud/wadm-maintainers
Name: Joonas Bergius
GitHub: @joonas
Organization: Cosmonic
Name: Dan Norris
GitHub: @protochron
Organization: Cosmonic
Name: Taylor Thomas
GitHub: @thomastaylor312
Organization: Cosmonic
Name: Ahmed Tadde
GitHub: @ahmedtadde
Organization: PreciseTarget
Name: Brooks Townsend
GitHub: @brooksmtownsend
Organization: Cosmonic

3
SECURITY.md Normal file
View File

@ -0,0 +1,3 @@
# Reporting a security issue
Please refer to the [wasmCloud Security Process and Policy](https://github.com/wasmCloud/wasmCloud/blob/main/SECURITY.md) for details on how to report security issues and vulnerabilities.

View File

@ -15,10 +15,10 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: "0.2.7"
version: '0.2.10'
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "v0.14.0"
appVersion: 'v0.21.0'

View File

@ -36,10 +36,15 @@ Common labels
{{- define "wadm.labels" -}}
helm.sh/chart: {{ include "wadm.chart" . }}
{{ include "wadm.selectorLabels" . }}
app.kubernetes.io/component: wadm
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/part-of: wadm
{{- with .Values.additionalLabels }}
{{ . | toYaml }}
{{- end }}
{{- end }}
{{/*
@ -98,4 +103,4 @@ volumes:
path: "nats.creds"
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -56,9 +56,9 @@ spec:
- name: WADM_TRACING_ENDPOINT
value: {{ .Values.wadm.config.tracingEndpoint | quote }}
{{- end }}
{{- if .Values.wadm.config.jetstreamDomain }}
{{- if .Values.wadm.config.nats.jetstreamDomain }}
- name: WADM_JETSTREAM_DOMAIN
value: {{ .Values.wadm.config.jetstreamDomain | quote }}
value: {{ .Values.wadm.config.nats.jetstreamDomain | quote }}
{{- end }}
{{- if .Values.wadm.config.maxJobs }}
- name: WADM_MAX_JOBS

View File

@ -34,6 +34,9 @@ imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
additionalLabels: {}
# app: wadm
serviceAccount:
# Specifies whether a service account should be created
create: true

View File

@ -1,7 +1,7 @@
[package]
name = "wadm-client"
description = "A client library for interacting with the wadm API"
version = "0.6.0"
version = "0.10.0"
edition = "2021"
authors = ["wasmCloud Team"]
keywords = ["webassembly", "wasmcloud", "wadm"]
@ -11,14 +11,10 @@ repository = "https://github.com/wasmcloud/wadm"
[dependencies]
anyhow = { workspace = true }
async-nats = { workspace = true }
bytes = { workspace = true }
futures = { workspace = true }
nkeys = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
serde_yaml = { workspace = true }
thiserror = { workspace = true }
tokio = { workspace = true, features = ["full"] }
tracing = { workspace = true, features = ["log"] }
tracing-futures = { workspace = true }
wadm-types = { workspace = true }

View File

@ -74,9 +74,8 @@ impl TopicGenerator {
/// Returns the full topic for WADM status subscriptions
pub fn wadm_status_topic(&self, app_name: &str) -> String {
format!(
"{}.{}.{}",
WADM_STATUS_API_PREFIX, self.topic_prefix, app_name
)
// Extract just the lattice name from topic_prefix
let lattice = self.topic_prefix.split('.').last().unwrap_or("default");
format!("{}.{}.{}", WADM_STATUS_API_PREFIX, lattice, app_name)
}
}

View File

@ -1,7 +1,7 @@
[package]
name = "wadm-types"
description = "Types and validators for the wadm API"
version = "0.6.0"
version = "0.8.3"
edition = "2021"
authors = ["wasmCloud Team"]
keywords = ["webassembly", "wasmcloud", "wadm"]
@ -9,35 +9,20 @@ license = "Apache-2.0"
repository = "https://github.com/wasmcloud/wadm"
[features]
default = []
wit = ["wit-bindgen-wrpc"]
wit = []
[dependencies]
serde_yaml = { workspace = true }
anyhow = { workspace = true }
async-nats = { workspace = true }
async-trait = { workspace = true }
bytes = { workspace = true }
chrono = { workspace = true }
cloudevents-sdk = { workspace = true }
indexmap = { workspace = true, features = ["serde"] }
jsonschema = { workspace = true }
lazy_static = { workspace = true }
nkeys = { workspace = true }
rand = { workspace = true, features = ["small_rng"] }
regex = { workspace = true }
schemars = { workspace = true }
semver = { workspace = true, features = ["serde"] }
serde = { workspace = true }
serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }
sha2 = { workspace = true }
thiserror = { workspace = true }
tokio = { workspace = true, features = ["full"] }
tracing = { workspace = true, features = ["log"] }
tracing-futures = { workspace = true }
ulid = { workspace = true, features = ["serde"] }
serde_yaml = { workspace = true }
utoipa = { workspace = true }
uuid = { workspace = true }
wasmcloud-control-interface = { workspace = true }
wasmcloud-secrets-types = { workspace = true }
wit-bindgen-wrpc = { workspace = true, optional = true }
[target.'cfg(not(target_family = "wasm"))'.dependencies]
tokio = { workspace = true, features = ["full"] }
wit-bindgen-wrpc = { workspace = true }
[target.'cfg(target_family = "wasm")'.dependencies]
wit-bindgen = { workspace = true, features = ["macros"] }

View File

@ -283,6 +283,13 @@ impl StatusInfo {
message: message.to_owned(),
}
}
pub fn unhealthy(message: &str) -> Self {
StatusInfo {
status_type: StatusType::Unhealthy,
message: message.to_owned(),
}
}
}
/// All possible status types
@ -297,6 +304,7 @@ pub enum StatusType {
#[serde(alias = "ready")]
Deployed,
Failed,
Unhealthy,
}
// Implementing add makes it easy for use to get an aggregate status by summing all of them together
@ -324,6 +332,8 @@ impl std::ops::Add for StatusType {
(_, Self::Waiting) => Self::Waiting,
(Self::Reconciling, _) => Self::Reconciling,
(_, Self::Reconciling) => Self::Reconciling,
(Self::Unhealthy, _) => Self::Unhealthy,
(_, Self::Unhealthy) => Self::Unhealthy,
// This is technically covered in the first comparison, but we'll be explicit
(Self::Deployed, Self::Deployed) => Self::Deployed,
}
@ -391,6 +401,20 @@ mod test {
StatusType::Failed
));
assert!(matches!(
[StatusType::Deployed, StatusType::Unhealthy]
.into_iter()
.sum(),
StatusType::Unhealthy
));
assert!(matches!(
[StatusType::Reconciling, StatusType::Unhealthy]
.into_iter()
.sum(),
StatusType::Reconciling
));
let empty: Vec<StatusType> = Vec::new();
assert!(matches!(empty.into_iter().sum(), StatusType::Undeployed));
}

View File

@ -5,16 +5,37 @@ use crate::{
},
CapabilityProperties, Component, ComponentProperties, ConfigDefinition, ConfigProperty,
LinkProperty, Manifest, Metadata, Policy, Properties, SecretProperty, SecretSourceProperty,
Specification, Spread, SpreadScalerProperty, TargetConfig, Trait, TraitProperty,
SharedApplicationComponentProperties, Specification, Spread, SpreadScalerProperty,
TargetConfig, Trait, TraitProperty,
};
use wasmcloud::wadm;
#[cfg(all(feature = "wit", target_family = "wasm"))]
wit_bindgen::generate!({
path: "wit",
additional_derives: [
serde::Serialize,
serde::Deserialize,
],
with: {
"wasmcloud:wadm/types@0.2.0": generate,
"wasmcloud:wadm/client@0.2.0": generate,
"wasmcloud:wadm/handler@0.2.0": generate
}
});
#[cfg(all(feature = "wit", not(target_family = "wasm")))]
wit_bindgen_wrpc::generate!({
generate_unused_types: true,
additional_derives: [
serde::Serialize,
serde::Deserialize,
],
with: {
"wasmcloud:wadm/types@0.2.0": generate,
"wasmcloud:wadm/client@0.2.0": generate,
"wasmcloud:wadm/handler@0.2.0": generate
}
});
// Trait implementations for converting types in the API module to the generated types
@ -87,6 +108,7 @@ impl From<Properties> for wadm::types::Properties {
impl From<ComponentProperties> for wadm::types::ComponentProperties {
fn from(properties: ComponentProperties) -> Self {
wadm::types::ComponentProperties {
application: properties.application.map(Into::into),
image: properties.image,
id: properties.id,
config: properties.config.into_iter().map(|c| c.into()).collect(),
@ -98,6 +120,7 @@ impl From<ComponentProperties> for wadm::types::ComponentProperties {
impl From<CapabilityProperties> for wadm::types::CapabilityProperties {
fn from(properties: CapabilityProperties) -> Self {
wadm::types::CapabilityProperties {
application: properties.application.map(Into::into),
image: properties.image,
id: properties.id,
config: properties.config.into_iter().map(|c| c.into()).collect(),
@ -135,6 +158,17 @@ impl From<SecretSourceProperty> for wadm::types::SecretSourceProperty {
}
}
impl From<SharedApplicationComponentProperties>
for wadm::types::SharedApplicationComponentProperties
{
fn from(properties: SharedApplicationComponentProperties) -> Self {
wadm::types::SharedApplicationComponentProperties {
name: properties.name,
component: properties.component,
}
}
}
impl From<Trait> for wadm::types::Trait {
fn from(trait_: Trait) -> Self {
wadm::types::Trait {
@ -258,6 +292,7 @@ impl From<StatusType> for wadm::types::StatusType {
StatusType::Deployed => wadm::types::StatusType::Deployed,
StatusType::Failed => wadm::types::StatusType::Failed,
StatusType::Waiting => wadm::types::StatusType::Waiting,
StatusType::Unhealthy => wadm::types::StatusType::Unhealthy,
}
}
}
@ -272,6 +307,7 @@ impl From<wadm::types::StatusType> for StatusType {
wadm::types::StatusType::Deployed => StatusType::Deployed,
wadm::types::StatusType::Failed => StatusType::Failed,
wadm::types::StatusType::Waiting => StatusType::Waiting,
wadm::types::StatusType::Unhealthy => StatusType::Unhealthy,
}
}
}
@ -391,6 +427,7 @@ impl From<wadm::types::ComponentProperties> for ComponentProperties {
fn from(properties: wadm::types::ComponentProperties) -> Self {
ComponentProperties {
image: properties.image,
application: properties.application.map(Into::into),
id: properties.id,
config: properties.config.into_iter().map(|c| c.into()).collect(),
secrets: properties.secrets.into_iter().map(|c| c.into()).collect(),
@ -402,6 +439,7 @@ impl From<wadm::types::CapabilityProperties> for CapabilityProperties {
fn from(properties: wadm::types::CapabilityProperties) -> Self {
CapabilityProperties {
image: properties.image,
application: properties.application.map(Into::into),
id: properties.id,
config: properties.config.into_iter().map(|c| c.into()).collect(),
secrets: properties.secrets.into_iter().map(|c| c.into()).collect(),
@ -438,6 +476,17 @@ impl From<wadm::types::SecretSourceProperty> for SecretSourceProperty {
}
}
impl From<wadm::types::SharedApplicationComponentProperties>
for SharedApplicationComponentProperties
{
fn from(properties: wadm::types::SharedApplicationComponentProperties) -> Self {
SharedApplicationComponentProperties {
name: properties.name,
component: properties.component,
}
}
}
impl From<wadm::types::Trait> for Trait {
fn from(trait_: wadm::types::Trait) -> Self {
Trait {

View File

@ -2,6 +2,7 @@ use std::collections::{BTreeMap, HashMap};
use schemars::JsonSchema;
use serde::{de, Deserialize, Serialize};
use utoipa::ToSchema;
pub mod api;
#[cfg(feature = "wit")]
@ -24,6 +25,8 @@ pub const VERSION_ANNOTATION_KEY: &str = "version";
/// The description key, as predefined by the [OAM
/// spec](https://github.com/oam-dev/spec/blob/master/metadata.md#annotations-format)
pub const DESCRIPTION_ANNOTATION_KEY: &str = "description";
/// The annotation key for shared applications
pub const SHARED_ANNOTATION_KEY: &str = "experimental.wasmcloud.dev/shared";
/// The identifier for the builtin spreadscaler trait type
pub const SPREADSCALER_TRAIT: &str = "spreadscaler";
/// The identifier for the builtin daemonscaler trait type
@ -33,9 +36,11 @@ pub const LINK_TRAIT: &str = "link";
/// The string used for indicating a latest version. It is explicitly forbidden to use as a version
/// for a manifest
pub const LATEST_VERSION: &str = "latest";
/// The default link name
pub const DEFAULT_LINK_NAME: &str = "default";
/// An OAM manifest
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, utoipa::ToSchema, JsonSchema)]
/// Manifest file based on the Open Application Model (OAM) specification for declaratively managing wasmCloud applications
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
#[serde(deny_unknown_fields)]
pub struct Manifest {
/// The OAM version of the manifest
@ -67,11 +72,65 @@ impl Manifest {
.map(|v| v.as_str())
}
/// Indicates if the manifest is shared, meaning it can be used by multiple applications
pub fn shared(&self) -> bool {
self.metadata
.annotations
.get(SHARED_ANNOTATION_KEY)
.is_some_and(|v| v.parse::<bool>().unwrap_or(false))
}
/// Returns the components in the manifest
pub fn components(&self) -> impl Iterator<Item = &Component> {
self.spec.components.iter()
}
/// Helper function to find shared components that are missing from the given list of
/// deployed applications
pub fn missing_shared_components(&self, deployed_apps: &[&Manifest]) -> Vec<&Component> {
self.spec
.components
.iter()
.filter(|shared_component| {
match &shared_component.properties {
Properties::Capability {
properties:
CapabilityProperties {
image: None,
application: Some(shared_app),
..
},
}
| Properties::Component {
properties:
ComponentProperties {
image: None,
application: Some(shared_app),
..
},
} => {
if deployed_apps.iter().filter(|a| a.shared()).any(|m| {
m.metadata.name == shared_app.name
&& m.components().any(|c| {
c.name == shared_app.component
// This compares just the enum variant, not the actual properties
// For example, if we reference a shared component that's a capability,
// we want to make sure the deployed component is a capability.
&& std::mem::discriminant(&c.properties)
== std::mem::discriminant(&shared_component.properties)
})
}) {
false
} else {
true
}
}
_ => false,
}
})
.collect()
}
/// Returns only the WebAssembly components in the manifest
pub fn wasm_components(&self) -> impl Iterator<Item = &Component> {
self.components()
@ -115,7 +174,7 @@ impl Manifest {
}
/// The metadata describing the manifest
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, JsonSchema)]
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
pub struct Metadata {
/// The name of the manifest. This must be unique per lattice
pub name: String,
@ -128,7 +187,7 @@ pub struct Metadata {
}
/// A representation of an OAM specification
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, JsonSchema)]
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
pub struct Specification {
/// The list of components for describing an application
pub components: Vec<Component>,
@ -141,7 +200,7 @@ pub struct Specification {
}
/// A policy definition
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, JsonSchema)]
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
pub struct Policy {
/// The name of this policy
pub name: String,
@ -153,9 +212,9 @@ pub struct Policy {
}
/// A component definition
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, JsonSchema)]
// TODO: for some reason this works fine for capapilities but not components
//#[serde(deny_unknown_fields)]
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
// TODO: figure out why this can't be uncommented
// #[serde(deny_unknown_fields)]
pub struct Component {
/// The name of this component
pub name: String,
@ -199,10 +258,15 @@ impl Component {
};
secrets
}
/// Returns only links in the component
fn links(&self) -> impl Iterator<Item = &Trait> {
self.traits.iter().flatten().filter(|t| t.is_link())
}
}
/// Properties that can be defined for a component
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, JsonSchema)]
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
#[serde(tag = "type")]
pub enum Properties {
#[serde(rename = "component", alias = "actor")]
@ -211,11 +275,17 @@ pub enum Properties {
Capability { properties: CapabilityProperties },
}
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, JsonSchema)]
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
#[serde(deny_unknown_fields)]
pub struct ComponentProperties {
/// The image reference to use
pub image: String,
/// The image reference to use. Required unless the component is a shared component
/// that is defined in another shared application.
#[serde(skip_serializing_if = "Option::is_none")]
pub image: Option<String>,
/// Information to locate a component within a shared application. Cannot be specified
/// if the image is specified.
#[serde(skip_serializing_if = "Option::is_none")]
pub application: Option<SharedApplicationComponentProperties>,
/// The component ID to use for this component. If not supplied, it will be generated
/// as a combination of the [Metadata::name] and the image reference.
#[serde(skip_serializing_if = "Option::is_none")]
@ -230,7 +300,7 @@ pub struct ComponentProperties {
pub secrets: Vec<SecretProperty>,
}
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Default, JsonSchema)]
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Default, ToSchema, JsonSchema)]
pub struct ConfigDefinition {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub config: Vec<ConfigProperty>,
@ -238,7 +308,7 @@ pub struct ConfigDefinition {
pub secrets: Vec<SecretProperty>,
}
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash, JsonSchema)]
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash, ToSchema, JsonSchema)]
pub struct SecretProperty {
/// The name of the secret. This is used by a reference by the component or capability to
/// get the secret value as a resource.
@ -248,7 +318,7 @@ pub struct SecretProperty {
pub properties: SecretSourceProperty,
}
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash, JsonSchema)]
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Hash, ToSchema, JsonSchema)]
pub struct SecretSourceProperty {
/// The policy to use for retrieving the secret.
pub policy: String,
@ -263,11 +333,17 @@ pub struct SecretSourceProperty {
pub version: Option<String>,
}
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, JsonSchema)]
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
#[serde(deny_unknown_fields)]
pub struct CapabilityProperties {
/// The image reference to use
pub image: String,
/// The image reference to use. Required unless the component is a shared component
/// that is defined in another shared application.
#[serde(skip_serializing_if = "Option::is_none")]
pub image: Option<String>,
/// Information to locate a component within a shared application. Cannot be specified
/// if the image is specified.
#[serde(skip_serializing_if = "Option::is_none")]
pub application: Option<SharedApplicationComponentProperties>,
/// The component ID to use for this provider. If not supplied, it will be generated
/// as a combination of the [Metadata::name] and the image reference.
#[serde(skip_serializing_if = "Option::is_none")]
@ -282,7 +358,15 @@ pub struct CapabilityProperties {
pub secrets: Vec<SecretProperty>,
}
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, JsonSchema)]
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
pub struct SharedApplicationComponentProperties {
/// The name of the shared application
pub name: String,
/// The name of the component in the shared application
pub component: String,
}
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
#[serde(deny_unknown_fields)]
pub struct Trait {
/// The type of trait specified. This should be a unique string for the type of scaler. As we
@ -329,7 +413,7 @@ impl Trait {
}
/// Properties for defining traits
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, JsonSchema)]
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
#[serde(untagged)]
#[allow(clippy::large_enum_variant)]
pub enum TraitProperty {
@ -373,7 +457,7 @@ impl From<SpreadScalerProperty> for TraitProperty {
///
/// Will result in two config scalers being created, one with the name `basic-kv` and one with the
/// name `default-port`. Wadm will not resolve collisions with configuration names between manifests.
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, JsonSchema)]
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
#[serde(deny_unknown_fields)]
pub struct ConfigProperty {
/// Name of the config to ensure exists
@ -393,7 +477,7 @@ impl PartialEq<ConfigProperty> for String {
}
/// Properties for links
#[derive(Debug, Serialize, Clone, PartialEq, Eq, JsonSchema, Default)]
#[derive(Debug, Serialize, Clone, PartialEq, Eq, ToSchema, JsonSchema, Default)]
#[serde(deny_unknown_fields)]
pub struct LinkProperty {
/// WIT namespace for the link
@ -493,7 +577,7 @@ impl<'de> Deserialize<'de> for LinkProperty {
}
}
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Default, JsonSchema)]
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Default, ToSchema, JsonSchema)]
pub struct TargetConfig {
/// The target this link applies to. This should be the name of a component in the manifest
pub name: String,
@ -510,7 +594,7 @@ impl PartialEq<TargetConfig> for String {
}
/// Properties for spread scalers
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, JsonSchema)]
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
#[serde(deny_unknown_fields)]
pub struct SpreadScalerProperty {
/// Number of instances to spread across matching requirements
@ -522,7 +606,7 @@ pub struct SpreadScalerProperty {
}
/// Configuration for various spreading requirements
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, JsonSchema)]
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, ToSchema, JsonSchema)]
#[serde(deny_unknown_fields)]
pub struct Spread {
/// The name of this spread requirement
@ -688,7 +772,7 @@ mod test {
&component.properties,
Properties::Capability {
properties: CapabilityProperties { image, .. }
} if image == "wasmcloud.azurecr.io/httpserver:0.13.1"
} if image.clone().expect("image to be present") == "wasmcloud.azurecr.io/httpserver:0.13.1"
)
})
.expect("Should find capability component")
@ -756,7 +840,8 @@ mod test {
name: "userinfo".to_string(),
properties: Properties::Component {
properties: ComponentProperties {
image: "wasmcloud.azurecr.io/fake:1".to_string(),
image: Some("wasmcloud.azurecr.io/fake:1".to_string()),
application: None,
id: None,
config: vec![],
secrets: vec![],
@ -769,7 +854,8 @@ mod test {
name: "webcap".to_string(),
properties: Properties::Capability {
properties: CapabilityProperties {
image: "wasmcloud.azurecr.io/httpserver:0.13.1".to_string(),
image: Some("wasmcloud.azurecr.io/httpserver:0.13.1".to_string()),
application: None,
id: None,
config: vec![],
secrets: vec![],
@ -797,7 +883,8 @@ mod test {
name: "ledblinky".to_string(),
properties: Properties::Capability {
properties: CapabilityProperties {
image: "wasmcloud.azurecr.io/ledblinky:0.0.1".to_string(),
image: Some("wasmcloud.azurecr.io/ledblinky:0.0.1".to_string()),
application: None,
id: None,
config: vec![],
secrets: vec![],

View File

@ -2,6 +2,7 @@
//!
use std::collections::{HashMap, HashSet};
#[cfg(not(target_family = "wasm"))]
use std::path::Path;
use std::sync::OnceLock;
@ -11,7 +12,7 @@ use serde::{Deserialize, Serialize};
use crate::{
CapabilityProperties, ComponentProperties, LinkProperty, Manifest, Properties, Trait,
TraitProperty, LATEST_VERSION,
TraitProperty, DEFAULT_LINK_NAME, LATEST_VERSION,
};
/// A namespace -> package -> interface lookup
@ -159,9 +160,10 @@ fn is_invalid_known_interface(
};
// Unknown interface inside known namespace and package is probably a bug
if !iface_lookup.contains_key(interface) {
// Unknown package inside a known interface we control is probably a bug
// Unknown package inside a known interface we control is probably a bug, but may be
// a new interface we don't know about yet
return vec![ValidationFailure::new(
ValidationFailureLevel::Error,
ValidationFailureLevel::Warning,
format!("unrecognized interface [{namespace}:{package}/{interface}]"),
)];
}
@ -273,6 +275,7 @@ impl ValidationOutput for Vec<ValidationFailure> {
/// # Arguments
///
/// * `path` - Path to the Manifest that will be read into memory and validated
#[cfg(not(target_family = "wasm"))]
pub async fn validate_manifest_file(
path: impl AsRef<Path>,
) -> Result<(Manifest, Vec<ValidationFailure>)> {
@ -296,9 +299,12 @@ pub async fn validate_manifest_file(
pub async fn validate_manifest_bytes(
content: impl AsRef<[u8]>,
) -> Result<(Manifest, Vec<ValidationFailure>)> {
let raw_yaml_content = content.as_ref();
let manifest =
serde_yaml::from_slice(content.as_ref()).context("failed to parse manifest content")?;
let failures = validate_manifest(&manifest).await?;
let mut failures = validate_manifest(&manifest).await?;
let mut yaml_issues = validate_raw_yaml(raw_yaml_content)?;
failures.append(&mut yaml_issues);
Ok((manifest, failures))
}
@ -341,6 +347,17 @@ pub async fn validate_manifest(manifest: &Manifest) -> Result<Vec<ValidationFail
failures.extend(check_dangling_links(manifest));
failures.extend(validate_policies(manifest));
failures.extend(ensure_no_custom_traits(manifest));
failures.extend(validate_component_properties(manifest));
failures.extend(check_duplicate_links(manifest));
failures.extend(validate_link_configs(manifest));
Ok(failures)
}
pub fn validate_raw_yaml(content: &[u8]) -> Result<Vec<ValidationFailure>> {
let mut failures = Vec::new();
let raw_content: serde_yaml::Value =
serde_yaml::from_slice(content).context("failed read raw yaml content")?;
failures.extend(validate_components_configs(&raw_content));
Ok(failures)
}
@ -596,6 +613,180 @@ fn validate_policies(manifest: &Manifest) -> Vec<ValidationFailure> {
failures
}
/// Ensure that all components in a manifest either specify an image reference or a shared
/// component in a different manifest. Note that this does not validate that the image reference
/// is valid or that the shared component is valid, only that one of the two properties is set.
pub fn validate_component_properties(application: &Manifest) -> Vec<ValidationFailure> {
let mut failures = Vec::new();
for component in application.spec.components.iter() {
match &component.properties {
Properties::Component {
properties:
ComponentProperties {
image,
application,
config,
secrets,
..
},
}
| Properties::Capability {
properties:
CapabilityProperties {
image,
application,
config,
secrets,
..
},
} => match (image, application) {
(Some(_), Some(_)) => {
failures.push(ValidationFailure::new(
ValidationFailureLevel::Error,
"Component cannot have both 'image' and 'application' properties".into(),
));
}
(None, None) => {
failures.push(ValidationFailure::new(
ValidationFailureLevel::Error,
"Component must have either 'image' or 'application' property".into(),
));
}
// This is a problem because of our left-folding config implementation. A shared application
// could specify additional config and actually overwrite the original manifest's config.
(None, Some(shared_properties)) if !config.is_empty() => {
failures.push(ValidationFailure::new(
ValidationFailureLevel::Error,
format!(
"Shared component '{}' cannot specify additional 'config'",
shared_properties.name
),
));
}
(None, Some(shared_properties)) if !secrets.is_empty() => {
failures.push(ValidationFailure::new(
ValidationFailureLevel::Error,
format!(
"Shared component '{}' cannot specify additional 'secrets'",
shared_properties.name
),
));
}
// Shared application components already have scale properties defined in their original manifest
(None, Some(shared_properties))
if component
.traits
.as_ref()
.is_some_and(|traits| traits.iter().any(|trt| trt.is_scaler())) =>
{
failures.push(ValidationFailure::new(
ValidationFailureLevel::Error,
format!(
"Shared component '{}' cannot include a scaler trait",
shared_properties.name
),
));
}
_ => {}
},
}
}
failures
}
/// Validates link configs in a WADM application manifest.
///
/// At present this can check for:
/// - all configs that declare `properties` have unique names
/// (configs without properties refer to existing configs)
///
pub fn validate_link_configs(manifest: &Manifest) -> Vec<ValidationFailure> {
let mut failures = Vec::new();
let mut link_config_names = HashSet::new();
for link_trait in manifest.links() {
if let TraitProperty::Link(LinkProperty { target, source, .. }) = &link_trait.properties {
for config in &target.config {
// we only need to check for uniqueness of configs with properties
if config.properties.is_none() {
continue;
}
// Check if config name is unique
if !link_config_names.insert(config.name.clone()) {
failures.push(ValidationFailure::new(
ValidationFailureLevel::Error,
format!("Duplicate link config name found: '{}'", config.name),
));
}
}
if let Some(source) = source {
for config in &source.config {
// we only need to check for uniqueness of configs with properties
if config.properties.is_none() {
continue;
}
// Check if config name is unique
if !link_config_names.insert(config.name.clone()) {
failures.push(ValidationFailure::new(
ValidationFailureLevel::Error,
format!("Duplicate link config name found: '{}'", config.name),
));
}
}
}
}
}
failures
}
/// Funtion to validate the component configs
/// from 0.13.0 source_config is deprecated and replaced with source:config:
/// this function validates the raw yaml to check for deprecated source_config and target_config
pub fn validate_components_configs(application: &serde_yaml::Value) -> Vec<ValidationFailure> {
let mut failures = Vec::new();
if let Some(specs) = application.get("spec") {
if let Some(components) = specs.get("components") {
if let Some(components_sequence) = components.as_sequence() {
for component in components_sequence.iter() {
failures.extend(get_deprecated_configs(component));
}
}
}
}
failures
}
fn get_deprecated_configs(component: &serde_yaml::Value) -> Vec<ValidationFailure> {
let mut failures = vec![];
if let Some(traits) = component.get("traits") {
if let Some(traits_sequence) = traits.as_sequence() {
for trait_ in traits_sequence.iter() {
if let Some(trait_type) = trait_.get("type") {
if trait_type.ne("link") {
continue;
}
}
if let Some(trait_properties) = trait_.get("properties") {
if trait_properties.get("source_config").is_some() {
failures.push(ValidationFailure {
level: ValidationFailureLevel::Warning,
msg: "one of the components' link trait contains a source_config key, please use source:config: rather".to_string(),
});
}
if trait_properties.get("target_config").is_some() {
failures.push(ValidationFailure {
level: ValidationFailureLevel::Warning,
msg: "one of the components' link trait contains a target_config key, please use target:config: rather".to_string(),
});
}
}
}
}
}
failures
}
/// This function validates that a key/value pair is a valid OAM label. It's using fairly
/// basic validation rules to ensure that the manifest isn't doing anything horribly wrong. Keeping
/// this function free of regex is intentional to keep this code functional but simple.
@ -640,6 +831,51 @@ pub fn is_valid_label_name(name: &str) -> bool {
.all(|c| c.is_ascii_alphanumeric() || c == '-' || c == '_' || c == '.')
}
/// Checks whether a manifest contains "duplicate" links.
///
/// Multiple links from the same source with the same name, namespace, package and interface
/// are considered duplicate links.
fn check_duplicate_links(manifest: &Manifest) -> Vec<ValidationFailure> {
let mut failures = Vec::new();
for component in manifest.components() {
let mut link_ids = HashSet::new();
for link in component.links() {
if let TraitProperty::Link(LinkProperty {
name,
namespace,
package,
interfaces,
..
}) = &link.properties
{
for interface in interfaces {
if !link_ids.insert((
name.clone()
.unwrap_or_else(|| DEFAULT_LINK_NAME.to_string()),
namespace,
package,
interface,
)) {
failures.push(ValidationFailure::new(
ValidationFailureLevel::Error,
format!(
"Duplicate link found inside component '{}': {} ({}:{}/{})",
component.name,
name.clone()
.unwrap_or_else(|| DEFAULT_LINK_NAME.to_string()),
namespace,
package,
interface
),
));
};
}
}
}
}
failures
}
#[cfg(test)]
mod tests {
use super::is_valid_manifest_name;

View File

@ -1,4 +1,4 @@
[wadm]
path = "../../../wit/wadm"
sha256 = "30b945b53e5dc1220f25da83449571e119cfd4029647a1908e5658d72335424e"
sha512 = "bbd7e5883dc4014ea246a33cf9386b11803cb330854e5691af526971c7131ad358eec9ad8f6dbf0ccd20efe0fedb43a3304f8e9538832d73cce7db09f82f1176"
sha256 = "9795ab1a83023da07da2dc28d930004bd913b9dbf07d68d9ef9207a44348a169"
sha512 = "9a94f33fd861912c81efd441cd19cc8066dbb2df5c2236d0472b66294bddc20ec5ad569484be18334d8c104ae9647b2c81c9878210ac35694ad8ba4a5b3780be"

View File

@ -73,6 +73,7 @@ interface types {
deployed,
failed,
waiting,
unhealthy
}
enum deploy-result {
@ -117,7 +118,8 @@ interface types {
// Properties for a component
record component-properties {
image: string,
image: option<string>,
application: option<shared-application-component-properties>,
id: option<string>,
config: list<config-property>,
secrets: list<secret-property>,
@ -125,7 +127,8 @@ interface types {
// Properties for a capability
record capability-properties {
image: string,
image: option<string>,
application: option<shared-application-component-properties>,
id: option<string>,
config: list<config-property>,
secrets: list<secret-property>,
@ -187,6 +190,12 @@ interface types {
version: option<string>,
}
// Shared application component properties
record shared-application-component-properties {
name: string,
component: string
}
// Target configuration
record target-config {
name: string,
@ -206,4 +215,4 @@ interface types {
requirements: list<tuple<string, string>>,
weight: option<u32>,
}
}
}

View File

@ -9,20 +9,29 @@ license = "Apache-2.0"
readme = "../../README.md"
repository = "https://github.com/wasmcloud/wadm"
[features]
# Enables clap attributes on the wadm configuration struct
cli = ["clap"]
http_admin = ["http", "http-body-util", "hyper", "hyper-util"]
default = []
[package.metadata.cargo-machete]
ignored = ["cloudevents-sdk"]
[dependencies]
anyhow = { workspace = true }
async-nats = { workspace = true }
async-trait = { workspace = true }
bytes = { workspace = true }
chrono = { workspace = true }
clap = { workspace = true, optional = true, features = ["derive", "cargo", "env"]}
cloudevents-sdk = { workspace = true }
http = { workspace = true, features = ["std"], optional = true }
http-body-util = { workspace = true, optional = true }
hyper = { workspace = true, optional = true }
hyper-util = { workspace = true, features = ["server"], optional = true }
futures = { workspace = true }
indexmap = { workspace = true, features = ["serde"] }
jsonschema = { workspace = true }
lazy_static = { workspace = true }
nkeys = { workspace = true }
rand = { workspace = true, features = ["small_rng"] }
regex = { workspace = true }
semver = { workspace = true, features = ["serde"] }
serde = { workspace = true }
serde_json = { workspace = true }

306
crates/wadm/src/config.rs Normal file
View File

@ -0,0 +1,306 @@
#[cfg(feature = "http_admin")]
use core::net::SocketAddr;
use std::path::PathBuf;
#[cfg(feature = "cli")]
use clap::Parser;
use wadm_types::api::DEFAULT_WADM_TOPIC_PREFIX;
use crate::nats::StreamPersistence;
#[derive(Clone, Debug)]
#[cfg_attr(feature = "cli", derive(Parser))]
#[cfg_attr(feature = "cli", command(name = clap::crate_name!(), version = clap::crate_version!(), about = "wasmCloud Application Deployment Manager", long_about = None))]
pub struct WadmConfig {
/// The ID for this wadm process. Defaults to a random UUIDv4 if none is provided. This is used
/// to help with debugging when identifying which process is doing the work
#[cfg_attr(
feature = "cli",
arg(short = 'i', long = "host-id", env = "WADM_HOST_ID")
)]
pub host_id: Option<String>,
/// Whether or not to use structured log output (as JSON)
#[cfg_attr(
feature = "cli",
arg(
short = 'l',
long = "structured-logging",
default_value = "false",
env = "WADM_STRUCTURED_LOGGING"
)
)]
pub structured_logging: bool,
/// Whether or not to enable opentelemetry tracing
#[cfg_attr(
feature = "cli",
arg(
short = 't',
long = "tracing",
default_value = "false",
env = "WADM_TRACING_ENABLED"
)
)]
pub tracing_enabled: bool,
/// The endpoint to use for tracing. Setting this flag enables tracing, even if --tracing is set
/// to false. Defaults to http://localhost:4318/v1/traces if not set and tracing is enabled
#[cfg_attr(
feature = "cli",
arg(short = 'e', long = "tracing-endpoint", env = "WADM_TRACING_ENDPOINT")
)]
pub tracing_endpoint: Option<String>,
/// The NATS JetStream domain to connect to
#[cfg_attr(feature = "cli", arg(short = 'd', env = "WADM_JETSTREAM_DOMAIN"))]
pub domain: Option<String>,
/// (Advanced) Tweak the maximum number of jobs to run for handling events and commands. Be
/// careful how you use this as it can affect performance
#[cfg_attr(
feature = "cli",
arg(short = 'j', long = "max-jobs", env = "WADM_MAX_JOBS")
)]
pub max_jobs: Option<usize>,
/// The URL of the nats server you want to connect to
#[cfg_attr(
feature = "cli",
arg(
short = 's',
long = "nats-server",
env = "WADM_NATS_SERVER",
default_value = "127.0.0.1:4222"
)
)]
pub nats_server: String,
/// Use the specified nkey file or seed literal for authentication. Must be used in conjunction with --nats-jwt
#[cfg_attr(
feature = "cli",
arg(
long = "nats-seed",
env = "WADM_NATS_NKEY",
conflicts_with = "nats_creds",
requires = "nats_jwt"
)
)]
pub nats_seed: Option<String>,
/// Use the specified jwt file or literal for authentication. Must be used in conjunction with --nats-nkey
#[cfg_attr(
feature = "cli",
arg(
long = "nats-jwt",
env = "WADM_NATS_JWT",
conflicts_with = "nats_creds",
requires = "nats_seed"
)
)]
pub nats_jwt: Option<String>,
/// (Optional) NATS credential file to use when authenticating
#[cfg_attr(
feature = "cli", arg(
long = "nats-creds-file",
env = "WADM_NATS_CREDS_FILE",
conflicts_with_all = ["nats_seed", "nats_jwt"],
))]
pub nats_creds: Option<PathBuf>,
/// (Optional) NATS TLS certificate file to use when authenticating
#[cfg_attr(
feature = "cli",
arg(long = "nats-tls-ca-file", env = "WADM_NATS_TLS_CA_FILE")
)]
pub nats_tls_ca_file: Option<PathBuf>,
/// Name of the bucket used for storage of lattice state
#[cfg_attr(
feature = "cli",
arg(
long = "state-bucket-name",
env = "WADM_STATE_BUCKET_NAME",
default_value = "wadm_state"
)
)]
pub state_bucket: String,
/// The amount of time in seconds to give for hosts to fail to heartbeat and be removed from the
/// store. By default, this is 70s because it is 2x the host heartbeat interval plus a little padding
#[cfg_attr(
feature = "cli",
arg(
long = "cleanup-interval",
env = "WADM_CLEANUP_INTERVAL",
default_value = "70"
)
)]
pub cleanup_interval: u64,
/// The API topic prefix to use. This is an advanced setting that should only be used if you
/// know what you are doing
#[cfg_attr(
feature = "cli", arg(
long = "api-prefix",
env = "WADM_API_PREFIX",
default_value = DEFAULT_WADM_TOPIC_PREFIX
))]
pub api_prefix: String,
/// This prefix to used for the internal streams. When running in a multitenant environment,
/// clients share the same JS domain (since messages need to come from lattices).
/// Setting a stream prefix makes it possible to have a separate stream for different wadms running in a multitenant environment.
/// This is an advanced setting that should only be used if you know what you are doing.
#[cfg_attr(
feature = "cli",
arg(long = "stream-prefix", env = "WADM_STREAM_PREFIX")
)]
pub stream_prefix: Option<String>,
/// Name of the bucket used for storage of manifests
#[cfg_attr(
feature = "cli",
arg(
long = "manifest-bucket-name",
env = "WADM_MANIFEST_BUCKET_NAME",
default_value = "wadm_manifests"
)
)]
pub manifest_bucket: String,
/// Run wadm in multitenant mode. This is for advanced multitenant use cases with segmented NATS
/// account traffic and not simple cases where all lattices use credentials from the same
/// account. See the deployment guide for more information
#[cfg_attr(
feature = "cli",
arg(long = "multitenant", env = "WADM_MULTITENANT", hide = true)
)]
pub multitenant: bool,
//
// Max bytes configuration for streams. Primarily configurable to enable deployment on NATS infra
// with limited resources.
//
/// Maximum bytes to keep for the state bucket
#[cfg_attr(
feature = "cli", arg(
long = "state-bucket-max-bytes",
env = "WADM_STATE_BUCKET_MAX_BYTES",
default_value_t = -1,
hide = true
))]
pub max_state_bucket_bytes: i64,
/// Maximum bytes to keep for the manifest bucket
#[cfg_attr(
feature = "cli", arg(
long = "manifest-bucket-max-bytes",
env = "WADM_MANIFEST_BUCKET_MAX_BYTES",
default_value_t = -1,
hide = true
))]
pub max_manifest_bucket_bytes: i64,
/// Nats streams storage type
#[cfg_attr(
feature = "cli", arg(
long = "stream-persistence",
env = "WADM_STREAM_PERSISTENCE",
default_value_t = StreamPersistence::File
))]
pub stream_persistence: StreamPersistence,
/// Maximum bytes to keep for the command stream
#[cfg_attr(
feature = "cli", arg(
long = "command-stream-max-bytes",
env = "WADM_COMMAND_STREAM_MAX_BYTES",
default_value_t = -1,
hide = true
))]
pub max_command_stream_bytes: i64,
/// Maximum bytes to keep for the event stream
#[cfg_attr(
feature = "cli", arg(
long = "event-stream-max-bytes",
env = "WADM_EVENT_STREAM_MAX_BYTES",
default_value_t = -1,
hide = true
))]
pub max_event_stream_bytes: i64,
/// Maximum bytes to keep for the event consumer stream
#[cfg_attr(
feature = "cli", arg(
long = "event-consumer-stream-max-bytes",
env = "WADM_EVENT_CONSUMER_STREAM_MAX_BYTES",
default_value_t = -1,
hide = true
))]
pub max_event_consumer_stream_bytes: i64,
/// Maximum bytes to keep for the status stream
#[cfg_attr(
feature = "cli", arg(
long = "status-stream-max-bytes",
env = "WADM_STATUS_STREAM_MAX_BYTES",
default_value_t = -1,
hide = true
))]
pub max_status_stream_bytes: i64,
/// Maximum bytes to keep for the notify stream
#[cfg_attr(
feature = "cli", arg(
long = "notify-stream-max-bytes",
env = "WADM_NOTIFY_STREAM_MAX_BYTES",
default_value_t = -1,
hide = true
))]
pub max_notify_stream_bytes: i64,
/// Maximum bytes to keep for the wasmbus event stream
#[cfg_attr(
feature = "cli", arg(
long = "wasmbus-event-stream-max-bytes",
env = "WADM_WASMBUS_EVENT_STREAM_MAX_BYTES",
default_value_t = -1,
hide = true
))]
pub max_wasmbus_event_stream_bytes: i64,
#[cfg(feature = "http_admin")]
#[cfg_attr(feature = "cli", clap(long = "http-admin", env = "WADM_HTTP_ADMIN"))]
/// HTTP administration endpoint address
pub http_admin: Option<SocketAddr>,
}
impl Default for WadmConfig {
fn default() -> Self {
Self {
host_id: None,
domain: None,
max_jobs: None,
nats_server: "127.0.0.1:4222".to_string(),
nats_seed: None,
nats_jwt: None,
nats_creds: None,
nats_tls_ca_file: None,
state_bucket: "wadm_state".to_string(),
cleanup_interval: 70,
api_prefix: DEFAULT_WADM_TOPIC_PREFIX.to_string(),
stream_prefix: None,
manifest_bucket: "wadm_manifests".to_string(),
multitenant: false,
max_state_bucket_bytes: -1,
max_manifest_bucket_bytes: -1,
stream_persistence: StreamPersistence::File,
max_command_stream_bytes: -1,
max_event_stream_bytes: -1,
max_event_consumer_stream_bytes: -1,
max_status_stream_bytes: -1,
max_notify_stream_bytes: -1,
max_wasmbus_event_stream_bytes: -1,
structured_logging: false,
tracing_enabled: false,
tracing_endpoint: None,
#[cfg(feature = "http_admin")]
http_admin: None,
}
}
}

View File

@ -1,6 +1,38 @@
use std::sync::Arc;
use std::time::Duration;
use anyhow::Result;
use async_nats::jetstream::{stream::Stream, Context};
use config::WadmConfig;
use tokio::{sync::Semaphore, task::JoinSet};
use tracing::log::debug;
#[cfg(feature = "http_admin")]
use anyhow::Context as _;
#[cfg(feature = "http_admin")]
use hyper::body::Bytes;
#[cfg(feature = "http_admin")]
use hyper_util::rt::{TokioExecutor, TokioIo};
#[cfg(feature = "http_admin")]
use tokio::net::TcpListener;
use crate::{
connections::ControlClientConstructor,
consumers::{
manager::{ConsumerManager, WorkerCreator},
*,
},
nats_utils::LatticeIdParser,
scaler::manager::{ScalerManager, WADM_NOTIFY_PREFIX},
server::{ManifestNotifier, Server},
storage::{nats_kv::NatsKvStore, reaper::Reaper},
workers::{CommandPublisher, CommandWorker, EventWorker, StatusPublisher},
};
pub use nats::StreamPersistence;
pub mod commands;
pub mod config;
pub mod consumers;
pub mod events;
pub mod nats_utils;
@ -10,7 +42,10 @@ pub mod server;
pub mod storage;
pub mod workers;
mod connections;
pub(crate) mod model;
mod nats;
mod observer;
#[cfg(test)]
pub mod test_util;
@ -39,3 +74,406 @@ pub const APP_SPEC_ANNOTATION: &str = "wasmcloud.dev/appspec";
pub const SCALER_KEY: &str = "wasmcloud.dev/scaler";
/// The default link name. In the future, this will likely be pulled in from another crate
pub const DEFAULT_LINK_NAME: &str = "default";
/// Default stream name for wadm events
pub const DEFAULT_WADM_EVENT_STREAM_NAME: &str = "wadm_events";
/// Default stream name for wadm event consumer
pub const DEFAULT_WADM_EVENT_CONSUMER_STREAM_NAME: &str = "wadm_event_consumer";
/// Default stream name for wadm commands
pub const DEFAULT_COMMAND_STREAM_NAME: &str = "wadm_commands";
/// Default stream name for wadm status
pub const DEFAULT_STATUS_STREAM_NAME: &str = "wadm_status";
/// Default stream name for wadm notifications
pub const DEFAULT_NOTIFY_STREAM_NAME: &str = "wadm_notify";
/// Default stream name for wasmbus events
pub const DEFAULT_WASMBUS_EVENT_STREAM_NAME: &str = "wasmbus_events";
/// Start wadm with the provided [WadmConfig], returning [JoinSet] with two tasks:
/// 1. The server task that listens for API requests
/// 2. The observer task that listens for events and commands
///
/// When embedding wadm in another application, this function should be called to start the wadm
/// server and observer tasks.
///
/// # Usage
///
/// ```no_run
/// async {
/// let config = wadm::config::WadmConfig::default();
/// let mut wadm = wadm::start_wadm(config).await.expect("should start wadm");
/// tokio::select! {
/// res = wadm.join_next() => {
/// match res {
/// Some(Ok(_)) => {
/// tracing::info!("WADM has exited successfully");
/// std::process::exit(0);
/// }
/// Some(Err(e)) => {
/// tracing::error!("WADM has exited with an error: {:?}", e);
/// std::process::exit(1);
/// }
/// None => {
/// tracing::info!("WADM server did not start");
/// std::process::exit(0);
/// }
/// }
/// }
/// _ = tokio::signal::ctrl_c() => {
/// tracing::info!("Received Ctrl+C, shutting down");
/// std::process::exit(0);
/// }
/// }
/// };
/// ```
pub async fn start_wadm(config: WadmConfig) -> Result<JoinSet<Result<()>>> {
// Build storage adapter for lattice state (on by default)
let (client, context) = nats::get_client_and_context(
config.nats_server.clone(),
config.domain.clone(),
config.nats_seed.clone(),
config.nats_jwt.clone(),
config.nats_creds.clone(),
config.nats_tls_ca_file.clone(),
)
.await?;
// TODO: We will probably need to set up all the flags (like lattice prefix and topic prefix) down the line
let connection_pool = ControlClientConstructor::new(client.clone(), None);
let trimmer: &[_] = &['.', '>', '*'];
let store = nats::ensure_kv_bucket(
&context,
config.state_bucket,
1,
config.max_state_bucket_bytes,
config.stream_persistence.into(),
)
.await?;
let state_storage = NatsKvStore::new(store);
let manifest_storage = nats::ensure_kv_bucket(
&context,
config.manifest_bucket,
1,
config.max_manifest_bucket_bytes,
config.stream_persistence.into(),
)
.await?;
let internal_stream_name = |stream_name: &str| -> String {
match config.stream_prefix.clone() {
Some(stream_prefix) => {
format!(
"{}.{}",
stream_prefix.trim_end_matches(trimmer),
stream_name
)
}
None => stream_name.to_string(),
}
};
debug!("Ensuring wadm event stream");
let event_stream = nats::ensure_limits_stream(
&context,
internal_stream_name(DEFAULT_WADM_EVENT_STREAM_NAME),
vec![DEFAULT_WADM_EVENTS_TOPIC.to_owned()],
Some(
"A stream that stores all events coming in on the wadm.evt subject in a cluster"
.to_string(),
),
config.max_event_stream_bytes,
config.stream_persistence.into(),
)
.await?;
debug!("Ensuring command stream");
let command_stream = nats::ensure_stream(
&context,
internal_stream_name(DEFAULT_COMMAND_STREAM_NAME),
vec![DEFAULT_COMMANDS_TOPIC.to_owned()],
Some("A stream that stores all commands for wadm".to_string()),
config.max_command_stream_bytes,
config.stream_persistence.into(),
)
.await?;
let status_stream = nats::ensure_status_stream(
&context,
internal_stream_name(DEFAULT_STATUS_STREAM_NAME),
vec![DEFAULT_STATUS_TOPIC.to_owned()],
config.max_status_stream_bytes,
config.stream_persistence.into(),
)
.await?;
debug!("Ensuring wasmbus event stream");
// Remove the previous wadm_(multitenant)_mirror streams so that they don't
// prevent us from creating the new wasmbus_(multitenant)_events stream
// TODO(joonas): Remove this some time in the future once we're confident
// enough that there are no more wadm_(multitenant)_mirror streams around.
for mirror_stream_name in &["wadm_mirror", "wadm_multitenant_mirror"] {
if (context.get_stream(mirror_stream_name).await).is_ok() {
context.delete_stream(mirror_stream_name).await?;
}
}
let wasmbus_event_subjects = match config.multitenant {
true => vec![DEFAULT_MULTITENANT_EVENTS_TOPIC.to_owned()],
false => vec![DEFAULT_EVENTS_TOPIC.to_owned()],
};
let wasmbus_event_stream = nats::ensure_limits_stream(
&context,
DEFAULT_WASMBUS_EVENT_STREAM_NAME.to_string(),
wasmbus_event_subjects.clone(),
Some(
"A stream that stores all events coming in on the wasmbus.evt subject in a cluster"
.to_string(),
),
config.max_wasmbus_event_stream_bytes,
config.stream_persistence.into(),
)
.await?;
debug!("Ensuring notify stream");
let notify_stream = nats::ensure_notify_stream(
&context,
DEFAULT_NOTIFY_STREAM_NAME.to_owned(),
vec![format!("{WADM_NOTIFY_PREFIX}.*")],
config.max_notify_stream_bytes,
config.stream_persistence.into(),
)
.await?;
debug!("Ensuring event consumer stream");
let event_consumer_stream = nats::ensure_event_consumer_stream(
&context,
DEFAULT_WADM_EVENT_CONSUMER_STREAM_NAME.to_owned(),
DEFAULT_WADM_EVENT_CONSUMER_TOPIC.to_owned(),
vec![&wasmbus_event_stream, &event_stream],
Some(
"A stream that sources from wadm_events and wasmbus_events for wadm event consumer's use"
.to_string(),
),
config.max_event_consumer_stream_bytes,
config.stream_persistence.into(),
)
.await?;
debug!("Creating event consumer manager");
let permit_pool = Arc::new(Semaphore::new(
config.max_jobs.unwrap_or(Semaphore::MAX_PERMITS),
));
let event_worker_creator = EventWorkerCreator {
state_store: state_storage.clone(),
manifest_store: manifest_storage.clone(),
pool: connection_pool.clone(),
command_topic_prefix: DEFAULT_COMMANDS_TOPIC.trim_matches(trimmer).to_owned(),
publisher: context.clone(),
notify_stream,
status_stream: status_stream.clone(),
};
let events_manager: ConsumerManager<EventConsumer> = ConsumerManager::new(
permit_pool.clone(),
event_consumer_stream,
event_worker_creator.clone(),
config.multitenant,
)
.await;
debug!("Creating command consumer manager");
let command_worker_creator = CommandWorkerCreator {
pool: connection_pool,
};
let commands_manager: ConsumerManager<CommandConsumer> = ConsumerManager::new(
permit_pool.clone(),
command_stream,
command_worker_creator.clone(),
config.multitenant,
)
.await;
// TODO(thomastaylor312): We might want to figure out how not to run this globally. Doing a
// synthetic event sent to the stream could be nice, but all the wadm processes would still fire
// off that tick, resulting in multiple people handling. We could maybe get it to work with the
// right duplicate window, but we have no idea when each process could fire a tick. Worst case
// scenario right now is that multiple fire simultaneously and a few of them just delete nothing
let reaper = Reaper::new(
state_storage.clone(),
Duration::from_secs(config.cleanup_interval / 2),
[],
);
let wadm_event_prefix = DEFAULT_WADM_EVENTS_TOPIC.trim_matches(trimmer);
debug!("Creating lattice observer");
let observer = observer::Observer {
parser: LatticeIdParser::new("wasmbus", config.multitenant),
command_manager: commands_manager,
event_manager: events_manager,
reaper,
client: client.clone(),
command_worker_creator,
event_worker_creator,
};
debug!("Subscribing to API topic");
let server = Server::new(
manifest_storage,
client,
Some(&config.api_prefix),
config.multitenant,
status_stream,
ManifestNotifier::new(wadm_event_prefix, context),
)
.await?;
let mut tasks = JoinSet::new();
#[cfg(feature = "http_admin")]
if let Some(addr) = config.http_admin {
debug!("Setting up HTTP administration endpoint");
let socket = TcpListener::bind(addr)
.await
.context("failed to bind on HTTP administation endpoint")?;
let svc = hyper::service::service_fn(move |req| {
const OK: &str = r#"{"status":"ok"}"#;
async move {
let (http::request::Parts { method, uri, .. }, _) = req.into_parts();
match (method.as_str(), uri.path()) {
("HEAD", "/livez") => Ok(http::Response::default()),
("GET", "/livez") => Ok(http::Response::new(http_body_util::Full::new(
Bytes::from(OK),
))),
(method, "/livez") => http::Response::builder()
.status(http::StatusCode::METHOD_NOT_ALLOWED)
.body(http_body_util::Full::new(Bytes::from(format!(
"method `{method}` not supported for path `/livez`"
)))),
("HEAD", "/readyz") => Ok(http::Response::default()),
("GET", "/readyz") => Ok(http::Response::new(http_body_util::Full::new(
Bytes::from(OK),
))),
(method, "/readyz") => http::Response::builder()
.status(http::StatusCode::METHOD_NOT_ALLOWED)
.body(http_body_util::Full::new(Bytes::from(format!(
"method `{method}` not supported for path `/readyz`"
)))),
(.., path) => http::Response::builder()
.status(http::StatusCode::NOT_FOUND)
.body(http_body_util::Full::new(Bytes::from(format!(
"unknown endpoint `{path}`"
)))),
}
}
});
let srv = hyper_util::server::conn::auto::Builder::new(TokioExecutor::new());
tasks.spawn(async move {
loop {
let stream = match socket.accept().await {
Ok((stream, _)) => stream,
Err(err) => {
tracing::error!(?err, "failed to accept HTTP administration connection");
continue;
}
};
if let Err(err) = srv.serve_connection(TokioIo::new(stream), svc).await {
tracing::error!(?err, "failed to serve HTTP administration connection");
}
}
});
}
// Subscribe and handle API requests
tasks.spawn(server.serve());
// Observe and handle events
tasks.spawn(observer.observe(wasmbus_event_subjects));
Ok(tasks)
}
#[derive(Clone)]
struct CommandWorkerCreator {
pool: ControlClientConstructor,
}
#[async_trait::async_trait]
impl WorkerCreator for CommandWorkerCreator {
type Output = CommandWorker;
async fn create(
&self,
lattice_id: &str,
multitenant_prefix: Option<&str>,
) -> anyhow::Result<Self::Output> {
let client = self.pool.get_connection(lattice_id, multitenant_prefix);
Ok(CommandWorker::new(client))
}
}
#[derive(Clone)]
struct EventWorkerCreator<StateStore> {
state_store: StateStore,
manifest_store: async_nats::jetstream::kv::Store,
pool: ControlClientConstructor,
command_topic_prefix: String,
publisher: Context,
notify_stream: Stream,
status_stream: Stream,
}
#[async_trait::async_trait]
impl<StateStore> WorkerCreator for EventWorkerCreator<StateStore>
where
StateStore: crate::storage::Store + Send + Sync + Clone + 'static,
{
type Output = EventWorker<StateStore, wasmcloud_control_interface::Client, Context>;
async fn create(
&self,
lattice_id: &str,
multitenant_prefix: Option<&str>,
) -> anyhow::Result<Self::Output> {
let client = self.pool.get_connection(lattice_id, multitenant_prefix);
let command_publisher = CommandPublisher::new(
self.publisher.clone(),
&format!("{}.{lattice_id}", self.command_topic_prefix),
);
let status_publisher = StatusPublisher::new(
self.publisher.clone(),
Some(self.status_stream.clone()),
&format!("wadm.status.{lattice_id}"),
);
let manager = ScalerManager::new(
self.publisher.clone(),
self.notify_stream.clone(),
lattice_id,
multitenant_prefix,
self.state_store.clone(),
self.manifest_store.clone(),
command_publisher.clone(),
status_publisher.clone(),
client.clone(),
)
.await?;
Ok(EventWorker::new(
self.state_store.clone(),
client,
command_publisher,
status_publisher,
manager,
))
}
}

View File

@ -5,17 +5,52 @@ use async_nats::{
jetstream::{
self,
kv::{Config as KvConfig, Store},
stream::{Config as StreamConfig, Source, Stream, SubjectTransform},
stream::{Config as StreamConfig, Source, StorageType, Stream, SubjectTransform},
Context,
},
Client, ConnectOptions,
};
use crate::DEFAULT_EXPIRY_TIME;
use tracing::{debug, warn};
use wadm::DEFAULT_EXPIRY_TIME;
#[derive(Debug, Clone, Copy, Default)]
pub enum StreamPersistence {
#[default]
File,
Memory,
}
impl std::fmt::Display for StreamPersistence {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
StreamPersistence::File => write!(f, "file"),
StreamPersistence::Memory => write!(f, "memory"),
}
}
}
impl From<StreamPersistence> for StorageType {
fn from(persistance: StreamPersistence) -> Self {
match persistance {
StreamPersistence::File => StorageType::File,
StreamPersistence::Memory => StorageType::Memory,
}
}
}
impl From<&str> for StreamPersistence {
fn from(persistance: &str) -> Self {
match persistance {
"file" => StreamPersistence::File,
"memory" => StreamPersistence::Memory,
_ => StreamPersistence::File,
}
}
}
/// Creates a NATS client from the given options
pub async fn get_client_and_context(
pub(crate) async fn get_client_and_context(
url: String,
js_domain: Option<String>,
seed: Option<String>,
@ -121,6 +156,7 @@ pub async fn ensure_stream(
subjects: Vec<String>,
description: Option<String>,
max_bytes: i64,
storage: StorageType,
) -> Result<Stream> {
debug!("Ensuring stream {name} exists");
let stream_config = StreamConfig {
@ -130,9 +166,9 @@ pub async fn ensure_stream(
retention: async_nats::jetstream::stream::RetentionPolicy::WorkQueue,
subjects,
max_age: DEFAULT_EXPIRY_TIME,
storage: async_nats::jetstream::stream::StorageType::File,
allow_rollup: false,
max_bytes,
storage,
..Default::default()
};
@ -161,6 +197,7 @@ pub async fn ensure_limits_stream(
subjects: Vec<String>,
description: Option<String>,
max_bytes: i64,
storage: StorageType,
) -> Result<Stream> {
debug!("Ensuring stream {name} exists");
let stream_config = StreamConfig {
@ -170,9 +207,9 @@ pub async fn ensure_limits_stream(
retention: async_nats::jetstream::stream::RetentionPolicy::Limits,
subjects,
max_age: DEFAULT_EXPIRY_TIME,
storage: async_nats::jetstream::stream::StorageType::File,
allow_rollup: false,
max_bytes,
storage,
..Default::default()
};
@ -202,6 +239,7 @@ pub async fn ensure_event_consumer_stream(
streams: Vec<&Stream>,
description: Option<String>,
max_bytes: i64,
storage: StorageType,
) -> Result<Stream> {
debug!("Ensuring stream {name} exists");
// This maps the upstream (wasmbus.evt.*.> & wadm.evt.*.>) Streams into
@ -242,9 +280,9 @@ pub async fn ensure_event_consumer_stream(
subjects: vec![],
max_age: DEFAULT_EXPIRY_TIME,
sources: Some(sources),
storage: async_nats::jetstream::stream::StorageType::File,
allow_rollup: false,
max_bytes,
storage,
..Default::default()
};
@ -268,6 +306,7 @@ pub async fn ensure_status_stream(
name: String,
subjects: Vec<String>,
max_bytes: i64,
storage: StorageType,
) -> Result<Stream> {
debug!("Ensuring stream {name} exists");
context
@ -282,8 +321,8 @@ pub async fn ensure_status_stream(
max_messages_per_subject: 10,
subjects,
max_age: std::time::Duration::from_nanos(0),
storage: async_nats::jetstream::stream::StorageType::File,
max_bytes,
storage,
..Default::default()
})
.await
@ -296,6 +335,7 @@ pub async fn ensure_notify_stream(
name: String,
subjects: Vec<String>,
max_bytes: i64,
storage: StorageType,
) -> Result<Stream> {
debug!("Ensuring stream {name} exists");
context
@ -306,8 +346,8 @@ pub async fn ensure_notify_stream(
retention: async_nats::jetstream::stream::RetentionPolicy::Interest,
subjects,
max_age: DEFAULT_EXPIRY_TIME,
storage: async_nats::jetstream::stream::StorageType::File,
max_bytes,
storage,
..Default::default()
})
.await
@ -321,6 +361,7 @@ pub async fn ensure_kv_bucket(
name: String,
history_to_keep: i64,
max_bytes: i64,
storage: StorageType,
) -> Result<Store> {
debug!("Ensuring kv bucket {name} exists");
if let Ok(kv) = context.get_key_value(&name).await {
@ -331,7 +372,7 @@ pub async fn ensure_kv_bucket(
bucket: name,
history: history_to_keep,
num_replicas: 1,
storage: jetstream::stream::StorageType::File,
storage,
max_bytes,
..Default::default()
})
@ -348,7 +389,7 @@ mod test {
#[tokio::test]
async fn can_resolve_jwt_value_and_file() -> Result<()> {
let my_jwt = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ2aWRlb0lkIjoiUWpVaUxYSnVjMjl0IiwiaWF0IjoxNjIwNjAzNDY5fQ.2PKx6y2ym6IWbeM6zFgHOkDnZEtGTR3YgYlQ2_Jki5g";
let jwt_path = "./tests/fixtures/nats.jwt";
let jwt_path = "../../tests/fixtures/nats.jwt";
let jwt_inside_file = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdHJpbmciOiAiQWNjb3JkIHRvIGFsbCBrbm93biBsb3dzIG9mIGF2aWF0aW9uLCB0aGVyZSBpcyBubyB3YXkgdGhhdCBhIGJlZSBhYmxlIHRvIGZseSJ9.GyU6pTRhflcOg6KBCU6wZedP8BQzLXbdgYIoU6KzzD8";
assert_eq!(

View File

@ -4,7 +4,7 @@ use async_nats::Subscriber;
use futures::{stream::SelectAll, StreamExt, TryFutureExt};
use tracing::{debug, error, instrument, trace, warn};
use wadm::{
use crate::{
consumers::{
manager::{ConsumerManager, WorkerCreator},
CommandConsumer, EventConsumer,

View File

@ -0,0 +1,780 @@
//! Contains code for converting the list of [`Component`]s in an application into a list of [`Scaler`]s
//! that are responsible for monitoring and enforcing the desired state of a lattice
use std::{collections::HashMap, time::Duration};
use anyhow::Result;
use tracing::{error, warn};
use wadm_types::{
api::StatusInfo, CapabilityProperties, Component, ComponentProperties, ConfigProperty,
LinkProperty, Policy, Properties, SecretProperty, SharedApplicationComponentProperties,
SpreadScalerProperty, Trait, TraitProperty, DAEMONSCALER_TRAIT, LINK_TRAIT, SPREADSCALER_TRAIT,
};
use wasmcloud_secrets_types::SECRET_PREFIX;
use crate::{
publisher::Publisher,
scaler::{
spreadscaler::{link::LINK_SCALER_KIND, ComponentSpreadScaler, SPREAD_SCALER_KIND},
statusscaler::StatusScaler,
Scaler,
},
storage::{snapshot::SnapshotStore, ReadStore},
workers::{ConfigSource, LinkSource, SecretSource},
DEFAULT_LINK_NAME,
};
use super::{
configscaler::ConfigScaler,
daemonscaler::{provider::ProviderDaemonScaler, ComponentDaemonScaler},
secretscaler::SecretScaler,
spreadscaler::{
link::{LinkScaler, LinkScalerConfig},
provider::{ProviderSpreadConfig, ProviderSpreadScaler},
},
BackoffWrapper,
};
pub(crate) type BoxedScaler = Box<dyn Scaler + Send + Sync + 'static>;
pub(crate) type ScalerList = Vec<BoxedScaler>;
const EMPTY_TRAIT_VEC: Vec<Trait> = Vec::new();
/// Converts a list of manifest [`Component`]s into a [`ScalerList`], resolving shared application
/// references, links, configuration and secrets as necessary.
///
/// # Arguments
/// * `components` - The list of components to convert
/// * `policies` - The policies to use when creating the scalers so they can access secrets
/// * `lattice_id` - The lattice id the scalers operate on
/// * `notifier` - The publisher to use when creating the scalers so they can report status
/// * `name` - The name of the manifest that the scalers are being created for
/// * `notifier_subject` - The subject to use when creating the scalers so they can report status
/// * `snapshot_data` - The store to use when creating the scalers so they can access lattice state
pub(crate) fn manifest_components_to_scalers<S, P, L>(
components: &[Component],
policies: &HashMap<&String, &Policy>,
lattice_id: &str,
manifest_name: &str,
notifier_subject: &str,
notifier: &P,
snapshot_data: &SnapshotStore<S, L>,
) -> ScalerList
where
S: ReadStore + Send + Sync + Clone + 'static,
P: Publisher + Clone + Send + Sync + 'static,
L: LinkSource + ConfigSource + SecretSource + Clone + Send + Sync + 'static,
{
let mut scalers: ScalerList = Vec::new();
components
.iter()
.for_each(|component| match &component.properties {
Properties::Component { properties } => {
// Determine if this component is contained in this manifest or a shared application
let (application_name, component_name) = match resolve_manifest_component(
manifest_name,
&component.name,
properties.image.as_ref(),
properties.application.as_ref(),
) {
Ok(names) => names,
Err(err) => {
error!(err);
scalers.push(Box::new(StatusScaler::new(
uuid::Uuid::new_v4().to_string(),
SPREAD_SCALER_KIND,
&component.name,
StatusInfo::failed(err),
)) as BoxedScaler);
return;
}
};
component_scalers(
&mut scalers,
components,
properties,
component.traits.as_ref(),
manifest_name,
application_name,
component_name,
lattice_id,
policies,
notifier_subject,
notifier,
snapshot_data,
)
}
Properties::Capability { properties } => {
// Determine if this component is contained in this manifest or a shared application
let (application_name, component_name) = match resolve_manifest_component(
manifest_name,
&component.name,
properties.image.as_ref(),
properties.application.as_ref(),
) {
Ok(names) => names,
Err(err) => {
error!(err);
scalers.push(Box::new(StatusScaler::new(
uuid::Uuid::new_v4().to_string(),
SPREAD_SCALER_KIND,
&component.name,
StatusInfo::failed(err),
)) as BoxedScaler);
return;
}
};
provider_scalers(
&mut scalers,
components,
properties,
component.traits.as_ref(),
manifest_name,
application_name,
component_name,
lattice_id,
policies,
notifier_subject,
notifier,
snapshot_data,
)
}
});
scalers
}
/// Helper function, primarily to remove nesting, that extends a [`ScalerList`] with all scalers
/// from a (Wasm) component [`Component`]
///
/// # Arguments
/// * `scalers` - The list of scalers to extend
/// * `components` - The list of components to convert
/// * `properties` - The properties of the component to convert
/// * `traits` - The traits of the component to convert
/// * `manifest_name` - The name of the manifest that the scalers are being created for
/// * `application_name` - The name of the application that the scalers are being created for
/// * `component_name` - The name of the component to convert
/// * **The following arguments are required to create scalers, passed directly through to the scaler
/// * `lattice_id` - The lattice id the scalers operate on
/// * `policies` - The policies to use when creating the scalers so they can access secrets
/// * `notifier_subject` - The subject to use when creating the scalers so they can report status
/// * `notifier` - The publisher to use when creating the scalers so they can report status
/// * `snapshot_data` - The store to use when creating the scalers so they can access lattice state
#[allow(clippy::too_many_arguments)]
fn component_scalers<S, P, L>(
scalers: &mut ScalerList,
components: &[Component],
properties: &ComponentProperties,
traits: Option<&Vec<Trait>>,
manifest_name: &str,
application_name: &str,
component_name: &str,
lattice_id: &str,
policies: &HashMap<&String, &Policy>,
notifier_subject: &str,
notifier: &P,
snapshot_data: &SnapshotStore<S, L>,
) where
S: ReadStore + Send + Sync + Clone + 'static,
P: Publisher + Clone + Send + Sync + 'static,
L: LinkSource + ConfigSource + SecretSource + Clone + Send + Sync + 'static,
{
scalers.extend(traits.unwrap_or(&EMPTY_TRAIT_VEC).iter().filter_map(|trt| {
// If an image is specified, then it's a component in the same manifest. Otherwise, it's a shared component
let component_id = if properties.image.is_some() {
compute_component_id(manifest_name, properties.id.as_ref(), component_name)
} else {
compute_component_id(application_name, properties.id.as_ref(), component_name)
};
let (config_scalers, mut config_names) =
config_to_scalers(snapshot_data, manifest_name, &properties.config);
let (secret_scalers, secret_names) = secrets_to_scalers(
snapshot_data,
manifest_name,
&properties.secrets,
policies,
);
config_names.append(&mut secret_names.clone());
// TODO(#451): Consider a way to report on status of a shared component
match (trt.trait_type.as_str(), &trt.properties, &properties.image) {
// Shared application components already have their own spread/daemon scalers, you
// cannot modify them from another manifest
(SPREADSCALER_TRAIT, TraitProperty::SpreadScaler(_), None) => {
warn!(
"Unsupported SpreadScaler trait specified for a shared component {component_name}"
);
None
}
(DAEMONSCALER_TRAIT, TraitProperty::SpreadScaler(_), None) => {
warn!(
"Unsupported DaemonScaler trait specified for a shared component {component_name}"
);
None
}
(SPREADSCALER_TRAIT, TraitProperty::SpreadScaler(p), Some(image_ref)) => {
// If the image is not specified, then it's a reference to a shared provider
// in a different manifest
Some(Box::new(BackoffWrapper::new(
ComponentSpreadScaler::new(
snapshot_data.clone(),
image_ref.clone(),
component_id,
lattice_id.to_owned(),
application_name.to_owned(),
p.to_owned(),
component_name,
config_names,
),
notifier.clone(),
config_scalers,
secret_scalers,
notifier_subject,
application_name,
Some(Duration::from_secs(5)),
)) as BoxedScaler)
}
(DAEMONSCALER_TRAIT, TraitProperty::SpreadScaler(p), Some(image_ref)) => {
Some(Box::new(BackoffWrapper::new(
ComponentDaemonScaler::new(
snapshot_data.clone(),
image_ref.to_owned(),
component_id,
lattice_id.to_owned(),
application_name.to_owned(),
p.to_owned(),
component_name,
config_names,
),
notifier.clone(),
config_scalers,
secret_scalers,
notifier_subject,
application_name,
Some(Duration::from_secs(5)),
)) as BoxedScaler)
}
(LINK_TRAIT, TraitProperty::Link(p), _) => {
// Find the target component of the link and create a scaler for it
components
.iter()
.find_map(|component| match &component.properties {
Properties::Capability {
properties:
CapabilityProperties {
id,
application,
image,
..
},
}
| Properties::Component {
properties:
ComponentProperties {
id,
application,
image,
..
},
} if component.name == p.target.name => Some(link_scaler(
p,
lattice_id,
manifest_name,
application_name,
&component.name,
component_id.to_string(),
id.as_ref(),
image.as_ref(),
application.as_ref(),
policies,
notifier_subject,
notifier,
snapshot_data,
)),
_ => None,
})
}
_ => None,
}
}));
}
/// Helper function, primarily to remove nesting, that extends a [`ScalerList`] with all scalers
/// from a capability provider [`Component`]
/// /// # Arguments
/// * `scalers` - The list of scalers to extend
/// * `components` - The list of components to convert
/// * `properties` - The properties of the capability provider to convert
/// * `traits` - The traits of the component to convert
/// * `manifest_name` - The name of the manifest that the scalers are being created for
/// * `application_name` - The name of the application that the scalers are being created for
/// * `component_name` - The name of the component to convert
/// * **The following arguments are required to create scalers, passed directly through to the scaler
/// * `lattice_id` - The lattice id the scalers operate on
/// * `policies` - The policies to use when creating the scalers so they can access secrets
/// * `notifier_subject` - The subject to use when creating the scalers so they can report status
/// * `notifier` - The publisher to use when creating the scalers so they can report status
/// * `snapshot_data` - The store to use when creating the scalers so they can access lattice state
#[allow(clippy::too_many_arguments)]
fn provider_scalers<S, P, L>(
scalers: &mut ScalerList,
components: &[Component],
properties: &CapabilityProperties,
traits: Option<&Vec<Trait>>,
manifest_name: &str,
application_name: &str,
component_name: &str,
lattice_id: &str,
policies: &HashMap<&String, &Policy>,
notifier_subject: &str,
notifier: &P,
snapshot_data: &SnapshotStore<S, L>,
) where
S: ReadStore + Send + Sync + Clone + 'static,
P: Publisher + Clone + Send + Sync + 'static,
L: LinkSource + ConfigSource + SecretSource + Clone + Send + Sync + 'static,
{
// If an image is specified, then it's a provider in the same manifest. Otherwise, it's a shared component
let provider_id = if properties.image.is_some() {
compute_component_id(manifest_name, properties.id.as_ref(), component_name)
} else {
compute_component_id(application_name, properties.id.as_ref(), component_name)
};
let mut scaler_specified = false;
scalers.extend(traits.unwrap_or(&EMPTY_TRAIT_VEC).iter().filter_map(|trt| {
match (trt.trait_type.as_str(), &trt.properties, &properties.image) {
// Shared application components already have their own spread/daemon scalers, you
// cannot modify them from another manifest
(SPREADSCALER_TRAIT, TraitProperty::SpreadScaler(_), None) => {
warn!(
"Unsupported SpreadScaler trait specified for a shared provider {component_name}"
);
None
}
(DAEMONSCALER_TRAIT, TraitProperty::SpreadScaler(_), None) => {
warn!(
"Unsupported DaemonScaler trait specified for a shared provider {component_name}"
);
None
}
(SPREADSCALER_TRAIT, TraitProperty::SpreadScaler(p), Some(image)) => {
scaler_specified = true;
let (config_scalers, mut config_names) =
config_to_scalers(snapshot_data, application_name, &properties.config);
let (secret_scalers, secret_names) = secrets_to_scalers(
snapshot_data,
application_name,
&properties.secrets,
policies,
);
config_names.append(&mut secret_names.clone());
Some(Box::new(BackoffWrapper::new(
ProviderSpreadScaler::new(
snapshot_data.clone(),
ProviderSpreadConfig {
lattice_id: lattice_id.to_owned(),
provider_id: provider_id.to_owned(),
provider_reference: image.to_owned(),
spread_config: p.to_owned(),
model_name: application_name.to_owned(),
provider_config: config_names,
},
component_name,
),
notifier.clone(),
config_scalers,
secret_scalers,
notifier_subject,
application_name,
// Providers are a bit longer because it can take a bit to download
Some(Duration::from_secs(60)),
)) as BoxedScaler)
}
(DAEMONSCALER_TRAIT, TraitProperty::SpreadScaler(p), Some(image)) => {
scaler_specified = true;
let (config_scalers, mut config_names) =
config_to_scalers(snapshot_data, application_name, &properties.config);
let (secret_scalers, secret_names) = secrets_to_scalers(
snapshot_data,
application_name,
&properties.secrets,
policies,
);
config_names.append(&mut secret_names.clone());
Some(Box::new(BackoffWrapper::new(
ProviderDaemonScaler::new(
snapshot_data.clone(),
ProviderSpreadConfig {
lattice_id: lattice_id.to_owned(),
provider_id: provider_id.to_owned(),
provider_reference: image.to_owned(),
spread_config: p.to_owned(),
model_name: application_name.to_owned(),
provider_config: config_names,
},
component_name,
),
notifier.clone(),
config_scalers,
secret_scalers,
notifier_subject,
application_name,
// Providers are a bit longer because it can take a bit to download
Some(Duration::from_secs(60)),
)) as BoxedScaler)
}
// Find the target component of the link and create a scaler for it.
(LINK_TRAIT, TraitProperty::Link(p), _) => {
components
.iter()
.find_map(|component| match &component.properties {
// Providers cannot link to other providers, only components
Properties::Capability { .. } if component.name == p.target.name => {
error!(
"Provider {} cannot link to provider {}, only components",
&component.name, p.target.name
);
None
}
Properties::Component {
properties:
ComponentProperties {
image,
application,
id,
..
},
} if component.name == p.target.name => Some(link_scaler(
p,
lattice_id,
manifest_name,
application_name,
&component.name,
provider_id.to_owned(),
id.as_ref(),
image.as_ref(),
application.as_ref(),
policies,
notifier_subject,
notifier,
snapshot_data,
)),
_ => None,
})
}
_ => None,
}
}));
// Allow providers to omit the spreadscaler entirely for simplicity
if !scaler_specified {
if let Some(image) = &properties.image {
let (config_scalers, mut config_names) =
config_to_scalers(snapshot_data, application_name, &properties.config);
let (secret_scalers, mut secret_names) = secrets_to_scalers(
snapshot_data,
application_name,
&properties.secrets,
policies,
);
config_names.append(&mut secret_names);
scalers.push(Box::new(BackoffWrapper::new(
ProviderSpreadScaler::new(
snapshot_data.clone(),
ProviderSpreadConfig {
lattice_id: lattice_id.to_owned(),
provider_id,
provider_reference: image.to_owned(),
spread_config: SpreadScalerProperty {
instances: 1,
spread: vec![],
},
model_name: application_name.to_owned(),
provider_config: config_names,
},
component_name,
),
notifier.clone(),
config_scalers,
secret_scalers,
notifier_subject,
application_name,
// Providers are a bit longer because it can take a bit to download
Some(Duration::from_secs(60)),
)) as BoxedScaler)
}
}
}
/// Resolves configuration, secrets, and the target of a link to create a boxed [`LinkScaler`]
///
/// # Arguments
/// * `link_property` - The properties of the link to convert
/// * `lattice_id` - The lattice id the scalers operate on
/// * `manifest_name` - The name of the manifest that the scalers are being created for
/// * `application_name` - The name of the application that the scalers are being created for
/// * `component_name` - The name of the component to convert
/// * `source_id` - The ID of the source component
/// * `target_id` - The optional ID of the target component
/// * `image` - The optional image reference of the target component
/// * `shared` - The optional shared application reference of the target component
/// * `policies` - The policies to use when creating the scalers so they can access secrets
/// * `notifier_subject` - The subject to use when creating the scalers so they can report status
/// * `notifier` - The publisher to use when creating the scalers so they can report status
/// * `snapshot_data` - The store to use when creating the scalers so they can access lattice state
#[allow(clippy::too_many_arguments)]
fn link_scaler<S, P, L>(
link_property: &LinkProperty,
lattice_id: &str,
manifest_name: &str,
application_name: &str,
component_name: &str,
source_id: String,
target_id: Option<&String>,
image: Option<&String>,
shared: Option<&SharedApplicationComponentProperties>,
policies: &HashMap<&String, &Policy>,
notifier_subject: &str,
notifier: &P,
snapshot_data: &SnapshotStore<S, L>,
) -> BoxedScaler
where
S: ReadStore + Send + Sync + Clone + 'static,
P: Publisher + Clone + Send + Sync + 'static,
L: LinkSource + ConfigSource + SecretSource + Clone + Send + Sync + 'static,
{
let (mut config_scalers, mut source_config) = config_to_scalers(
snapshot_data,
manifest_name,
&link_property
.source
.as_ref()
.unwrap_or(&Default::default())
.config,
);
let (target_config_scalers, mut target_config) =
config_to_scalers(snapshot_data, manifest_name, &link_property.target.config);
let (target_secret_scalers, target_secrets) = secrets_to_scalers(
snapshot_data,
manifest_name,
&link_property.target.secrets,
policies,
);
let (mut source_secret_scalers, source_secrets) = secrets_to_scalers(
snapshot_data,
manifest_name,
&link_property
.source
.as_ref()
.unwrap_or(&Default::default())
.secrets,
policies,
);
config_scalers.extend(target_config_scalers);
source_secret_scalers.extend(target_secret_scalers);
target_config.extend(target_secrets);
source_config.extend(source_secrets);
let (target_manifest_name, target_component_name) =
match resolve_manifest_component(manifest_name, component_name, image, shared) {
Ok(name) => name,
Err(err) => {
error!(err);
return Box::new(StatusScaler::new(
uuid::Uuid::new_v4().to_string(),
LINK_SCALER_KIND,
format!(
"{} -({}:{})-> {}",
component_name,
link_property.namespace,
link_property.package,
link_property.target.name
),
StatusInfo::failed(err),
)) as BoxedScaler;
}
};
let target = compute_component_id(target_manifest_name, target_id, target_component_name);
Box::new(BackoffWrapper::new(
LinkScaler::new(
snapshot_data.clone(),
LinkScalerConfig {
source_id,
target,
wit_namespace: link_property.namespace.to_owned(),
wit_package: link_property.package.to_owned(),
wit_interfaces: link_property.interfaces.to_owned(),
name: link_property
.name
.to_owned()
.unwrap_or_else(|| DEFAULT_LINK_NAME.to_string()),
lattice_id: lattice_id.to_owned(),
model_name: application_name.to_owned(),
source_config,
target_config,
},
snapshot_data.clone(),
),
notifier.clone(),
config_scalers,
source_secret_scalers,
notifier_subject,
application_name,
Some(Duration::from_secs(5)),
)) as BoxedScaler
}
/// Returns a tuple which is a list of scalers and a list of the names of the configs that the
/// scalers use.
///
/// Any input [ConfigProperty] that has a `properties` field will be converted into a [ConfigScaler], and
/// the name of the configuration will be modified to be unique to the model and component. If the properties
/// field is not present, the name will be used as-is and assumed that it's managed externally to wadm.
fn config_to_scalers<C: ConfigSource + Send + Sync + Clone>(
config_source: &C,
manifest_name: &str,
configs: &[ConfigProperty],
) -> (Vec<ConfigScaler<C>>, Vec<String>) {
configs
.iter()
.map(|config| {
let name = if config.properties.is_some() {
compute_component_id(manifest_name, None, &config.name)
} else {
config.name.clone()
};
(
ConfigScaler::new(config_source.clone(), &name, config.properties.as_ref()),
name,
)
})
.unzip()
}
fn secrets_to_scalers<S: SecretSource + Send + Sync + Clone>(
secret_source: &S,
manifest_name: &str,
secrets: &[SecretProperty],
policies: &HashMap<&String, &Policy>,
) -> (Vec<SecretScaler<S>>, Vec<String>) {
secrets
.iter()
.map(|s| {
let name = compute_secret_id(manifest_name, None, &s.name);
let policy = *policies.get(&s.properties.policy).unwrap();
(
SecretScaler::new(
name.clone(),
policy.clone(),
s.clone(),
secret_source.clone(),
),
name,
)
})
.unzip()
}
/// Based on the name of the model and the optionally provided ID, returns a unique ID for the
/// component that is a sanitized version of the component reference and model name, separated
/// by a dash.
pub(crate) fn compute_component_id(
manifest_name: &str,
component_id: Option<&String>,
component_name: &str,
) -> String {
if let Some(id) = component_id {
id.to_owned()
} else {
format!(
"{}-{}",
manifest_name
.to_lowercase()
.replace(|c: char| !c.is_ascii_alphanumeric(), "_"),
component_name
.to_lowercase()
.replace(|c: char| !c.is_ascii_alphanumeric(), "_")
)
}
}
pub(crate) fn compute_secret_id(
manifest_name: &str,
component_id: Option<&String>,
component_name: &str,
) -> String {
let name = compute_component_id(manifest_name, component_id, component_name);
format!("{SECRET_PREFIX}_{name}")
}
/// Helper function to resolve a link to a manifest component, returning the name of the manifest
/// and the name of the component where the target resides.
///
/// If the component resides in the same manifest, then the name of the manifest & the name of the
/// component as specified will be returned. In the case that the component resides in a shared
/// application, the name of the shared application & the name of the component in that application
/// will be returned.
///
/// # Arguments
/// * `application_name` - The name of the manifest that the scalers are being created for
/// * `component_name` - The name of the component in the source manifest to target
/// * `component_image_ref` - The image reference for the component
/// * `shared_app_info` - The optional shared application reference for the component
fn resolve_manifest_component<'a>(
application_name: &'a str,
component_name: &'a str,
component_image_ref: Option<&'a String>,
shared_app_info: Option<&'a SharedApplicationComponentProperties>,
) -> Result<(&'a str, &'a str), &'a str> {
match (component_image_ref, shared_app_info) {
(Some(_), None) => Ok((application_name, component_name)),
(None, Some(app)) => Ok((app.name.as_str(), app.component.as_str())),
// These two cases should both be unreachable, since this is caught at manifest
// validation before it's put. Just in case, we'll log an error and ensure the status is failed
(None, None) => Err("Application did not specify an image or shared application reference"),
(Some(_image), Some(_app)) => {
Err("Application specified both an image and a shared application reference")
}
}
}
#[cfg(test)]
mod test {
use super::compute_component_id;
#[test]
fn compute_proper_component_id() {
// User supplied ID always takes precedence
assert_eq!(
compute_component_id("mymodel", Some(&"myid".to_string()), "echo"),
"myid"
);
assert_eq!(
compute_component_id(
"some model name with spaces cause yaml",
Some(&"myid".to_string()),
" echo "
),
"myid"
);
// Sanitize component reference
assert_eq!(
compute_component_id("mymodel", None, "echo-component"),
"mymodel-echo_component"
);
// Ensure we can support spaces in the model name, because YAML strings
assert_eq!(
compute_component_id("some model name with spaces cause yaml", None, "echo"),
"some_model_name_with_spaces_cause_yaml-echo"
);
// Ensure we can support spaces in the model name, because YAML strings
// Ensure we can support lowercasing the reference as well, just in case
assert_eq!(
compute_component_id("My ThInG", None, "thing.wasm"),
"my_thing-thing_wasm"
);
}
}

View File

@ -7,6 +7,7 @@ use tokio::sync::RwLock;
use tracing::{instrument, trace};
use wadm_types::{api::StatusInfo, Spread, SpreadScalerProperty, TraitProperty};
use crate::events::ConfigSet;
use crate::scaler::spreadscaler::{
compute_ineligible_hosts, eligible_hosts, spreadscaler_annotations,
};
@ -119,6 +120,9 @@ impl<S: ReadStore + Send + Sync + Clone> Scaler for ComponentDaemonScaler<S> {
Ok(Vec::new())
}
}
Event::ConfigSet(ConfigSet { config_name }) if self.config.contains(config_name) => {
self.reconcile().await
}
// No other event impacts the job of this scaler so we can ignore it
_ => Ok(Vec::new()),
}

View File

@ -4,15 +4,20 @@ use anyhow::Result;
use async_trait::async_trait;
use tokio::sync::RwLock;
use tracing::{instrument, trace};
use wadm_types::api::StatusType;
use wadm_types::{api::StatusInfo, Spread, SpreadScalerProperty, TraitProperty};
use crate::commands::StopProvider;
use crate::events::{HostHeartbeat, ProviderInfo, ProviderStarted, ProviderStopped};
use crate::events::{
ConfigSet, HostHeartbeat, ProviderHealthCheckFailed, ProviderHealthCheckInfo,
ProviderHealthCheckPassed, ProviderInfo, ProviderStarted, ProviderStopped,
};
use crate::scaler::compute_id_sha256;
use crate::scaler::spreadscaler::{
compute_ineligible_hosts, eligible_hosts, provider::ProviderSpreadConfig,
spreadscaler_annotations,
};
use crate::storage::{Provider, ProviderStatus};
use crate::SCALER_KEY;
use crate::{
commands::{Command, StartProvider},
@ -97,6 +102,65 @@ impl<S: ReadStore + Send + Sync + Clone> Scaler for ProviderDaemonScaler<S> {
{
self.reconcile().await
}
// perform status updates for health check events
Event::ProviderHealthCheckFailed(ProviderHealthCheckFailed {
data: ProviderHealthCheckInfo { provider_id, .. },
})
| Event::ProviderHealthCheckPassed(ProviderHealthCheckPassed {
data: ProviderHealthCheckInfo { provider_id, .. },
}) if provider_id == &self.config.provider_id => {
let provider = self
.store
.get::<Provider>(&self.config.lattice_id, &self.config.provider_id)
.await?;
let unhealthy_providers = provider.map_or(0, |p| {
p.hosts
.values()
.filter(|s| *s == &ProviderStatus::Failed)
.count()
});
let status = self.status.read().await.to_owned();
// update health status of scaler
if let Some(status) = match (status, unhealthy_providers > 0) {
// scaler is deployed but contains unhealthy providers
(
StatusInfo {
status_type: StatusType::Deployed,
..
},
true,
) => Some(StatusInfo::failed(&format!(
"Unhealthy provider on {} host(s)",
unhealthy_providers
))),
// scaler can become unhealthy only if it was previously deployed
// once scaler becomes healthy again revert back to deployed state
// this is a workaround to detect unhealthy status until
// StatusType::Unhealthy can be used
(
StatusInfo {
status_type: StatusType::Failed,
message,
},
false,
) if message.starts_with("Unhealthy provider on") => {
Some(StatusInfo::deployed(""))
}
// don't update status if scaler is not deployed
_ => None,
} {
*self.status.write().await = status;
}
// only status needs update no new commands required
Ok(Vec::new())
}
Event::ConfigSet(ConfigSet { config_name })
if self.config.provider_config.contains(config_name) =>
{
self.reconcile().await
}
// No other event impacts the job of this scaler so we can ignore it
_ => Ok(Vec::new()),
}
@ -105,7 +169,6 @@ impl<S: ReadStore + Send + Sync + Clone> Scaler for ProviderDaemonScaler<S> {
#[instrument(level = "trace", skip_all, fields(name = %self.config.model_name, scaler_id = %self.id))]
async fn reconcile(&self) -> Result<Vec<Command>> {
let hosts = self.store.list::<Host>(&self.config.lattice_id).await?;
let provider_id = &self.config.provider_id;
let provider_ref = &self.config.provider_reference;
@ -505,4 +568,274 @@ mod test {
Ok(())
}
#[tokio::test]
async fn test_healthy_providers_return_healthy_status() -> Result<()> {
let lattice_id = "test_healthy_providers";
let provider_ref = "fakecloud.azurecr.io/provider:3.2.1".to_string();
let provider_id = "VASDASDIAMAREALPROVIDERPROVIDER";
let host_id_one = "NASDASDIMAREALHOSTONE";
let host_id_two = "NASDASDIMAREALHOSTTWO";
let store = Arc::new(TestStore::default());
store
.store(
lattice_id,
host_id_one.to_string(),
Host {
components: HashMap::new(),
friendly_name: "hey".to_string(),
labels: HashMap::from_iter([
("inda".to_string(), "cloud".to_string()),
("cloud".to_string(), "fake".to_string()),
("region".to_string(), "us-noneofyourbusiness-1".to_string()),
]),
providers: HashSet::from_iter([ProviderInfo {
provider_id: provider_id.to_string(),
provider_ref: provider_ref.to_string(),
annotations: BTreeMap::default(),
}]),
uptime_seconds: 123,
version: None,
id: host_id_one.to_string(),
last_seen: Utc::now(),
},
)
.await?;
store
.store(
lattice_id,
host_id_two.to_string(),
Host {
components: HashMap::new(),
friendly_name: "hey".to_string(),
labels: HashMap::from_iter([
("inda".to_string(), "cloud".to_string()),
("cloud".to_string(), "real".to_string()),
("region".to_string(), "us-yourhouse-1".to_string()),
]),
providers: HashSet::from_iter([ProviderInfo {
provider_id: provider_id.to_string(),
provider_ref: provider_ref.to_string(),
annotations: BTreeMap::default(),
}]),
uptime_seconds: 123,
version: None,
id: host_id_two.to_string(),
last_seen: Utc::now(),
},
)
.await?;
store
.store(
lattice_id,
provider_id.to_string(),
Provider {
id: provider_id.to_string(),
name: "provider".to_string(),
issuer: "issuer".to_string(),
reference: provider_ref.to_string(),
hosts: HashMap::from([
(host_id_one.to_string(), ProviderStatus::Failed),
(host_id_two.to_string(), ProviderStatus::Running),
]),
},
)
.await?;
// Ensure we spread evenly with equal weights, clean division
let multi_spread_even = SpreadScalerProperty {
// instances are ignored so putting an absurd number
instances: 2,
spread: vec![Spread {
name: "SimpleOne".to_string(),
requirements: BTreeMap::from_iter([("inda".to_string(), "cloud".to_string())]),
weight: Some(100),
}],
};
let spreadscaler = ProviderDaemonScaler::new(
store.clone(),
ProviderSpreadConfig {
lattice_id: lattice_id.to_string(),
provider_id: provider_id.to_string(),
provider_reference: provider_ref.to_string(),
spread_config: multi_spread_even,
model_name: MODEL_NAME.to_string(),
provider_config: vec!["foobar".to_string()],
},
"fake_component",
);
spreadscaler.reconcile().await?;
spreadscaler
.handle_event(&Event::ProviderHealthCheckFailed(
ProviderHealthCheckFailed {
data: ProviderHealthCheckInfo {
provider_id: provider_id.to_string(),
host_id: host_id_one.to_string(),
},
},
))
.await?;
store
.store(
lattice_id,
provider_id.to_string(),
Provider {
id: provider_id.to_string(),
name: "provider".to_string(),
issuer: "issuer".to_string(),
reference: provider_ref.to_string(),
hosts: HashMap::from([
(host_id_one.to_string(), ProviderStatus::Pending),
(host_id_two.to_string(), ProviderStatus::Running),
]),
},
)
.await?;
spreadscaler
.handle_event(&Event::ProviderHealthCheckPassed(
ProviderHealthCheckPassed {
data: ProviderHealthCheckInfo {
provider_id: provider_id.to_string(),
host_id: host_id_two.to_string(),
},
},
))
.await?;
assert_eq!(
spreadscaler.status.read().await.to_owned(),
StatusInfo::deployed("")
);
Ok(())
}
#[tokio::test]
async fn test_unhealthy_providers_return_unhealthy_status() -> Result<()> {
let lattice_id = "test_unhealthy_providers";
let provider_ref = "fakecloud.azurecr.io/provider:3.2.1".to_string();
let provider_id = "VASDASDIAMAREALPROVIDERPROVIDER";
let host_id_one = "NASDASDIMAREALHOSTONE";
let host_id_two = "NASDASDIMAREALHOSTTWO";
let store = Arc::new(TestStore::default());
store
.store(
lattice_id,
host_id_one.to_string(),
Host {
components: HashMap::new(),
friendly_name: "hey".to_string(),
labels: HashMap::from_iter([
("inda".to_string(), "cloud".to_string()),
("cloud".to_string(), "fake".to_string()),
("region".to_string(), "us-noneofyourbusiness-1".to_string()),
]),
providers: HashSet::from_iter([ProviderInfo {
provider_id: provider_id.to_string(),
provider_ref: provider_ref.to_string(),
annotations: BTreeMap::default(),
}]),
uptime_seconds: 123,
version: None,
id: host_id_one.to_string(),
last_seen: Utc::now(),
},
)
.await?;
store
.store(
lattice_id,
host_id_two.to_string(),
Host {
components: HashMap::new(),
friendly_name: "hey".to_string(),
labels: HashMap::from_iter([
("inda".to_string(), "cloud".to_string()),
("cloud".to_string(), "real".to_string()),
("region".to_string(), "us-yourhouse-1".to_string()),
]),
providers: HashSet::from_iter([ProviderInfo {
provider_id: provider_id.to_string(),
provider_ref: provider_ref.to_string(),
annotations: BTreeMap::default(),
}]),
uptime_seconds: 123,
version: None,
id: host_id_two.to_string(),
last_seen: Utc::now(),
},
)
.await?;
store
.store(
lattice_id,
provider_id.to_string(),
Provider {
id: provider_id.to_string(),
name: "provider".to_string(),
issuer: "issuer".to_string(),
reference: provider_ref.to_string(),
hosts: HashMap::from([
(host_id_one.to_string(), ProviderStatus::Failed),
(host_id_two.to_string(), ProviderStatus::Running),
]),
},
)
.await?;
// Ensure we spread evenly with equal weights, clean division
let multi_spread_even = SpreadScalerProperty {
// instances are ignored so putting an absurd number
instances: 2,
spread: vec![Spread {
name: "SimpleOne".to_string(),
requirements: BTreeMap::from_iter([("inda".to_string(), "cloud".to_string())]),
weight: Some(100),
}],
};
let spreadscaler = ProviderDaemonScaler::new(
store.clone(),
ProviderSpreadConfig {
lattice_id: lattice_id.to_string(),
provider_id: provider_id.to_string(),
provider_reference: provider_ref.to_string(),
spread_config: multi_spread_even,
model_name: MODEL_NAME.to_string(),
provider_config: vec!["foobar".to_string()],
},
"fake_component",
);
spreadscaler.reconcile().await?;
spreadscaler
.handle_event(&Event::ProviderHealthCheckFailed(
ProviderHealthCheckFailed {
data: ProviderHealthCheckInfo {
provider_id: provider_id.to_string(),
host_id: host_id_one.to_string(),
},
},
))
.await?;
assert_eq!(
spreadscaler.status.read().await.to_owned(),
StatusInfo::failed("Unhealthy provider on 1 host(s)")
);
Ok(())
}
}

View File

@ -1,6 +1,6 @@
//! A struct that manages creating and removing scalers for all manifests
use std::{collections::HashMap, ops::Deref, sync::Arc, time::Duration};
use std::{collections::HashMap, ops::Deref, sync::Arc};
use anyhow::Result;
use async_nats::jetstream::{
@ -19,31 +19,18 @@ use tokio::{
use tracing::{debug, error, instrument, trace, warn};
use wadm_types::{
api::{Status, StatusInfo},
CapabilityProperties, Component, ComponentProperties, ConfigProperty, Manifest, Policy,
Properties, SecretProperty, SpreadScalerProperty, Trait, TraitProperty, DAEMONSCALER_TRAIT,
LINK_TRAIT, SPREADSCALER_TRAIT,
Manifest,
};
use wasmcloud_secrets_types::SECRET_PREFIX;
use crate::{
events::Event,
publisher::Publisher,
scaler::{spreadscaler::ComponentSpreadScaler, Command, Scaler},
scaler::{Command, Scaler},
storage::{snapshot::SnapshotStore, ReadStore},
workers::{CommandPublisher, ConfigSource, LinkSource, SecretSource, StatusPublisher},
DEFAULT_LINK_NAME,
};
use super::{
configscaler::ConfigScaler,
daemonscaler::{provider::ProviderDaemonScaler, ComponentDaemonScaler},
secretscaler::SecretScaler,
spreadscaler::{
link::{LinkScaler, LinkScalerConfig},
provider::{ProviderSpreadConfig, ProviderSpreadScaler},
},
BackoffWrapper,
};
use super::convert::manifest_components_to_scalers;
pub type BoxedScaler = Box<dyn Scaler + Send + Sync + 'static>;
pub type ScalerList = Vec<BoxedScaler>;
@ -202,13 +189,13 @@ where
.filter_map(|manifest| {
let data = manifest.get_deployed()?;
let name = manifest.name().to_owned();
let scalers = components_to_scalers(
let scalers = manifest_components_to_scalers(
&data.spec.components,
&data.policy_lookup(),
lattice_id,
&client,
&name,
&subject,
&client,
&snapshot_data,
);
Some((name, scalers))
@ -293,13 +280,13 @@ where
}
pub fn scalers_for_manifest<'a>(&'a self, manifest: &'a Manifest) -> ScalerList {
components_to_scalers(
manifest_components_to_scalers(
&manifest.spec.components,
&manifest.policy_lookup(),
&self.lattice_id,
&self.client,
&manifest.metadata.name,
&self.subject,
&self.client,
&self.snapshot_data,
)
}
@ -443,13 +430,13 @@ where
match notification {
Notifications::CreateScalers(manifest) => {
// We don't want to trigger the notification, so just create the scalers and then insert
let scalers = components_to_scalers(
let scalers = manifest_components_to_scalers(
&manifest.spec.components,
&manifest.policy_lookup(),
&self.lattice_id,
&self.client,
&manifest.metadata.name,
&self.subject,
&self.client,
&self.snapshot_data,
);
let num_scalers = scalers.len();
@ -566,471 +553,3 @@ where
}
}
}
const EMPTY_TRAIT_VEC: Vec<Trait> = Vec::new();
/// Converts a list of components into a list of scalers
///
/// # Arguments
/// * `components` - The list of components to convert
/// * `store` - The store to use when creating the scalers so they can access lattice state
/// * `lattice_id` - The lattice id the scalers operate on
/// * `name` - The name of the manifest that the scalers are being created for
pub(crate) fn components_to_scalers<S, P, L>(
components: &[Component],
policies: &HashMap<&String, &Policy>,
lattice_id: &str,
notifier: &P,
name: &str,
notifier_subject: &str,
snapshot_data: &SnapshotStore<S, L>,
) -> ScalerList
where
S: ReadStore + Send + Sync + Clone + 'static,
P: Publisher + Clone + Send + Sync + 'static,
L: LinkSource + ConfigSource + SecretSource + Clone + Send + Sync + 'static,
{
let mut scalers: ScalerList = Vec::new();
for component in components.iter() {
let traits = component.traits.as_ref();
match &component.properties {
Properties::Component { properties: props } => {
scalers.extend(traits.unwrap_or(&EMPTY_TRAIT_VEC).iter().filter_map(|trt| {
let component_id =
compute_component_id(name, props.id.as_ref(), &component.name);
let (config_scalers, mut config_names) =
config_to_scalers(snapshot_data.clone(), name, &props.config);
let (secret_scalers, secret_names) =
secrets_to_scalers(snapshot_data.clone(), name, &props.secrets, policies);
config_names.append(&mut secret_names.clone());
match (trt.trait_type.as_str(), &trt.properties) {
(SPREADSCALER_TRAIT, TraitProperty::SpreadScaler(p)) => {
Some(Box::new(BackoffWrapper::new(
ComponentSpreadScaler::new(
snapshot_data.clone(),
props.image.to_owned(),
component_id,
lattice_id.to_owned(),
name.to_owned(),
p.to_owned(),
&component.name,
config_names,
),
notifier.to_owned(),
config_scalers,
secret_scalers,
notifier_subject,
name,
Some(Duration::from_secs(5)),
)) as BoxedScaler)
}
(DAEMONSCALER_TRAIT, TraitProperty::SpreadScaler(p)) => {
Some(Box::new(BackoffWrapper::new(
ComponentDaemonScaler::new(
snapshot_data.clone(),
props.image.to_owned(),
component_id,
lattice_id.to_owned(),
name.to_owned(),
p.to_owned(),
&component.name,
config_names,
),
notifier.to_owned(),
config_scalers,
secret_scalers,
notifier_subject,
name,
Some(Duration::from_secs(5)),
)) as BoxedScaler)
}
(LINK_TRAIT, TraitProperty::Link(p)) => {
components.iter().find_map(|component| {
let (mut config_scalers, mut source_config) = config_to_scalers(
snapshot_data.clone(),
name,
&p.source.as_ref().unwrap_or(&Default::default()).config,
);
let (target_config_scalers, mut target_config) = config_to_scalers(
snapshot_data.clone(),
name,
&p.target.config,
);
let (target_secret_scalers, target_secrets) = secrets_to_scalers(
snapshot_data.clone(),
name,
&p.target.secrets,
policies,
);
let (mut source_secret_scalers, source_secrets) =
secrets_to_scalers(
snapshot_data.clone(),
name,
&p.source.as_ref().unwrap_or(&Default::default()).secrets,
policies,
);
config_scalers.extend(target_config_scalers);
source_secret_scalers.extend(target_secret_scalers);
target_config.extend(target_secrets);
source_config.extend(source_secrets);
match &component.properties {
Properties::Capability {
properties: CapabilityProperties { id, .. },
}
| Properties::Component {
properties: ComponentProperties { id, .. },
} if component.name == p.target.name => {
Some(Box::new(BackoffWrapper::new(
LinkScaler::new(
snapshot_data.clone(),
LinkScalerConfig {
source_id: component_id.to_string(),
target: compute_component_id(
name,
id.as_ref(),
&component.name,
),
wit_namespace: p.namespace.to_owned(),
wit_package: p.package.to_owned(),
wit_interfaces: p.interfaces.to_owned(),
name: p.name.to_owned().unwrap_or_else(|| {
DEFAULT_LINK_NAME.to_string()
}),
lattice_id: lattice_id.to_owned(),
model_name: name.to_owned(),
source_config,
target_config,
},
snapshot_data.clone(),
),
notifier.to_owned(),
config_scalers,
source_secret_scalers,
notifier_subject,
name,
Some(Duration::from_secs(5)),
))
as BoxedScaler)
}
_ => None,
}
})
}
_ => None,
}
}))
}
Properties::Capability { properties: props } => {
let provider_id = compute_component_id(name, props.id.as_ref(), &component.name);
let mut scaler_specified = false;
if let Some(traits) = traits {
scalers.extend(traits.iter().filter_map(|trt| {
match (trt.trait_type.as_str(), &trt.properties) {
(SPREADSCALER_TRAIT, TraitProperty::SpreadScaler(p)) => {
scaler_specified = true;
let (config_scalers, mut config_names) =
config_to_scalers(snapshot_data.clone(), name, &props.config);
let (secret_scalers, secret_names) = secrets_to_scalers(
snapshot_data.clone(),
name,
&props.secrets,
policies,
);
config_names.append(&mut secret_names.clone());
Some(Box::new(BackoffWrapper::new(
ProviderSpreadScaler::new(
snapshot_data.clone(),
ProviderSpreadConfig {
lattice_id: lattice_id.to_owned(),
provider_id: provider_id.to_owned(),
provider_reference: props.image.to_owned(),
spread_config: p.to_owned(),
model_name: name.to_owned(),
provider_config: config_names,
},
&component.name,
),
notifier.to_owned(),
config_scalers,
secret_scalers,
notifier_subject,
name,
// Providers are a bit longer because it can take a bit to download
Some(Duration::from_secs(60)),
)) as BoxedScaler)
}
(DAEMONSCALER_TRAIT, TraitProperty::SpreadScaler(p)) => {
scaler_specified = true;
let (config_scalers, mut config_names) =
config_to_scalers(snapshot_data.clone(), name, &props.config);
let (secret_scalers, secret_names) = secrets_to_scalers(
snapshot_data.clone(),
name,
&props.secrets,
policies,
);
config_names.append(&mut secret_names.clone());
Some(Box::new(BackoffWrapper::new(
ProviderDaemonScaler::new(
snapshot_data.clone(),
ProviderSpreadConfig {
lattice_id: lattice_id.to_owned(),
provider_id: provider_id.to_owned(),
provider_reference: props.image.to_owned(),
spread_config: p.to_owned(),
model_name: name.to_owned(),
provider_config: config_names,
},
&component.name,
),
notifier.to_owned(),
config_scalers,
secret_scalers,
notifier_subject,
name,
// Providers are a bit longer because it can take a bit to download
Some(Duration::from_secs(60)),
)) as BoxedScaler)
}
(LINK_TRAIT, TraitProperty::Link(p)) => {
components.iter().find_map(|component| {
let (mut config_scalers, mut source_config) = config_to_scalers(
snapshot_data.clone(),
name,
&p.source.as_ref().unwrap_or(&Default::default()).config,
);
let (target_config_scalers, mut target_config) =
config_to_scalers(
snapshot_data.clone(),
name,
&p.target.config,
);
let (target_secret_scalers, target_secrets) =
secrets_to_scalers(
snapshot_data.clone(),
name,
&p.target.secrets,
policies,
);
let (mut source_secret_scalers, source_secrets) =
secrets_to_scalers(
snapshot_data.clone(),
name,
&p.source
.as_ref()
.unwrap_or(&Default::default())
.secrets,
policies,
);
config_scalers.extend(target_config_scalers);
source_secret_scalers.extend(target_secret_scalers);
target_config.extend(target_secrets);
source_config.extend(source_secrets);
match &component.properties {
Properties::Component { properties: cappy }
if component.name == p.target.name =>
{
Some(Box::new(BackoffWrapper::new(
LinkScaler::new(
snapshot_data.clone(),
LinkScalerConfig {
source_id: provider_id.to_string(),
target: compute_component_id(
name,
cappy.id.as_ref(),
&component.name,
),
wit_namespace: p.namespace.to_owned(),
wit_package: p.package.to_owned(),
wit_interfaces: p.interfaces.to_owned(),
name: p.name.to_owned().unwrap_or_else(
|| DEFAULT_LINK_NAME.to_string(),
),
lattice_id: lattice_id.to_owned(),
model_name: name.to_owned(),
source_config,
target_config,
},
snapshot_data.clone(),
),
notifier.to_owned(),
config_scalers,
source_secret_scalers,
notifier_subject,
name,
Some(Duration::from_secs(5)),
))
as BoxedScaler)
}
_ => None,
}
})
}
_ => None,
}
}))
}
// Allow providers to omit the scaler entirely for simplicity
if !scaler_specified {
let (config_scalers, mut config_names) =
config_to_scalers(snapshot_data.clone(), name, &props.config);
let (secret_scalers, secret_names) =
secrets_to_scalers(snapshot_data.clone(), name, &props.secrets, policies);
config_names.append(&mut secret_names.clone());
scalers.push(Box::new(BackoffWrapper::new(
ProviderSpreadScaler::new(
snapshot_data.clone(),
ProviderSpreadConfig {
lattice_id: lattice_id.to_owned(),
provider_id,
provider_reference: props.image.to_owned(),
spread_config: SpreadScalerProperty {
instances: 1,
spread: vec![],
},
model_name: name.to_owned(),
provider_config: config_names,
},
&component.name,
),
notifier.to_owned(),
config_scalers,
secret_scalers,
notifier_subject,
name,
// Providers are a bit longer because it can take a bit to download
Some(Duration::from_secs(60)),
)) as BoxedScaler)
}
}
}
}
scalers
}
/// Returns a tuple which is a list of scalers and a list of the names of the configs that the
/// scalers use.
///
/// Any input [ConfigProperty] that has a `properties` field will be converted into a [ConfigScaler], and
/// the name of the configuration will be modified to be unique to the model and component. If the properties
/// field is not present, the name will be used as-is and assumed that it's managed externally to wadm.
fn config_to_scalers<C: ConfigSource + Send + Sync + Clone>(
config_source: C,
model_name: &str,
configs: &[ConfigProperty],
) -> (Vec<ConfigScaler<C>>, Vec<String>) {
configs
.iter()
.map(|config| {
let name = if config.properties.is_some() {
compute_component_id(model_name, None, &config.name)
} else {
config.name.clone()
};
(
ConfigScaler::new(config_source.clone(), &name, config.properties.as_ref()),
name,
)
})
.unzip()
}
fn secrets_to_scalers<S: SecretSource + Send + Sync + Clone>(
secret_source: S,
model_name: &str,
secrets: &[SecretProperty],
policies: &HashMap<&String, &Policy>,
) -> (Vec<SecretScaler<S>>, Vec<String>) {
secrets
.iter()
.map(|s| {
let name = compute_secret_id(model_name, None, &s.name);
let policy = *policies.get(&s.properties.policy).unwrap();
(
SecretScaler::new(
name.clone(),
policy.clone(),
s.clone(),
secret_source.clone(),
),
name,
)
})
.unzip()
}
/// Based on the name of the model and the optionally provided ID, returns a unique ID for the
/// component that is a sanitized version of the component reference and model name, separated
/// by a dash.
pub(crate) fn compute_component_id(
model_name: &str,
component_id: Option<&String>,
component_name: &str,
) -> String {
if let Some(id) = component_id {
id.to_owned()
} else {
format!(
"{}-{}",
model_name
.to_lowercase()
.replace(|c: char| !c.is_ascii_alphanumeric(), "_"),
component_name
.to_lowercase()
.replace(|c: char| !c.is_ascii_alphanumeric(), "_")
)
}
}
pub(crate) fn compute_secret_id(
model_name: &str,
component_id: Option<&String>,
component_name: &str,
) -> String {
let name = compute_component_id(model_name, component_id, component_name);
format!("{SECRET_PREFIX}_{name}")
}
#[cfg(test)]
mod test {
use crate::scaler::manager::compute_component_id;
#[test]
fn compute_proper_component_id() {
// User supplied ID always takes precedence
assert_eq!(
compute_component_id("mymodel", Some(&"myid".to_string()), "echo"),
"myid"
);
assert_eq!(
compute_component_id(
"some model name with spaces cause yaml",
Some(&"myid".to_string()),
" echo "
),
"myid"
);
// Sanitize component reference
assert_eq!(
compute_component_id("mymodel", None, "echo-component"),
"mymodel-echo_component"
);
// Ensure we can support spaces in the model name, because YAML strings
assert_eq!(
compute_component_id("some model name with spaces cause yaml", None, "echo"),
"some_model_name_with_spaces_cause_yaml-echo"
);
// Ensure we can support spaces in the model name, because YAML strings
// Ensure we can support lowercasing the reference as well, just in case
assert_eq!(
compute_component_id("My ThInG", None, "thing.wasm"),
"my_thing-thing_wasm"
);
}
}

View File

@ -18,10 +18,12 @@ use crate::{
};
pub mod configscaler;
mod convert;
pub mod daemonscaler;
pub mod manager;
pub mod secretscaler;
pub mod spreadscaler;
pub mod statusscaler;
use manager::Notifications;

View File

@ -1,5 +1,6 @@
use std::collections::{BTreeMap, HashSet};
use std::{cmp::Ordering, cmp::Reverse, collections::HashMap};
use std::{
cmp::Ordering, cmp::Reverse, collections::BTreeMap, collections::HashMap, collections::HashSet,
};
use anyhow::Result;
use async_trait::async_trait;
@ -9,7 +10,7 @@ use wadm_types::{
api::StatusInfo, Spread, SpreadScalerProperty, TraitProperty, DEFAULT_SPREAD_WEIGHT,
};
use crate::events::HostHeartbeat;
use crate::events::{ConfigSet, HostHeartbeat};
use crate::{
commands::{Command, ScaleComponent},
events::{Event, HostStarted, HostStopped},
@ -113,6 +114,9 @@ impl<S: ReadStore + Send + Sync + Clone> Scaler for ComponentSpreadScaler<S> {
Ok(Vec::new())
}
}
Event::ConfigSet(ConfigSet { config_name }) if self.config.contains(config_name) => {
self.reconcile().await
}
// No other event impacts the job of this scaler so we can ignore it
_ => Ok(Vec::new()),
}
@ -171,6 +175,7 @@ impl<S: ReadStore + Send + Sync + Clone> Scaler for ComponentSpreadScaler<S> {
let mut spread_status = vec![];
trace!(spread_requirements = ?self.spread_requirements, ?component_id, "Computing commands");
let mut component_instances_per_eligible_host: HashMap<&String, usize> = HashMap::new();
let commands = self
.spread_requirements
.iter()
@ -207,6 +212,14 @@ impl<S: ReadStore + Send + Sync + Clone> Scaler for ComponentSpreadScaler<S> {
}).collect()
})
.unwrap_or_default();
running_components_per_host.iter().for_each(|(host_id, count)| {
component_instances_per_eligible_host
.entry(host_id)
.and_modify(|e| *e += count)
.or_insert(*count);
});
let current_count: usize = running_components_per_host.values().sum();
trace!(current = %current_count, expected = %count, "Calculated running components, reconciling with expected count");
// Here we'll generate commands for the proper host depending on where they are running
@ -266,6 +279,19 @@ impl<S: ReadStore + Send + Sync + Clone> Scaler for ComponentSpreadScaler<S> {
.collect::<Vec<Command>>();
trace!(?commands, "Calculated commands for component scaler");
// Detect spread requirement conflicts
if let Some(message) = detect_spread_requirement_conflicts(
&self.spread_requirements,
&hosts,
&component_instances_per_eligible_host,
&commands,
) {
let status = StatusInfo::failed(&message);
trace!(?status, "Updating scaler status");
*self.status.write().await = status;
return Ok(vec![]);
}
let status = match (spread_status.is_empty(), commands.is_empty()) {
// No failures, no commands, scaler satisfied
(true, true) => StatusInfo::deployed(""),
@ -470,6 +496,94 @@ fn compute_spread(spread_config: &SpreadScalerProperty) -> Vec<(Spread, usize)>
computed_spreads
}
fn detect_spread_requirement_conflicts(
spread_requirements: &[(Spread, usize)],
hosts: &HashMap<String, Host>,
running_instances_per_host: &HashMap<&String, usize>,
commands: &[Command],
) -> Option<String> {
// Step 1: Determine the union of all eligible hosts for the configured spreads
// and collect the current instance count for each eligible host
let mut eligible_hosts_instances: HashMap<String, usize> = HashMap::new();
for (spread, _) in spread_requirements {
for (host_id, host) in hosts {
if spread.requirements.iter().all(|(key, value)| {
host.labels
.get(key)
.map(|val| val == value)
.unwrap_or(false)
}) {
let count = running_instances_per_host
.get(host_id)
.cloned()
.unwrap_or(0);
eligible_hosts_instances.insert(host_id.clone(), count);
}
}
}
// Step 2: derive changeset from commands (for commands that share the same host_id, select the command with the highest instance count & idx is used as a tiebreaker)
let mut changeset: HashMap<String, (usize, usize)> = HashMap::new();
for (idx, command) in commands.iter().enumerate() {
if let Command::ScaleComponent(ScaleComponent { host_id, count, .. }) = command {
let entry = changeset.entry(host_id.clone()).or_insert((0, usize::MAX));
if *count as usize > entry.0 || (*count as usize == entry.0 && idx < entry.1) {
*entry = (*count as usize, idx);
}
}
}
// Apply changeset to the eligible_hosts_instances
for (host_id, (count, _)) in changeset {
if let Some(current_count) = eligible_hosts_instances.get_mut(&host_id) {
*current_count = count;
}
}
// Step 3: Create a structure that maps a Spread to a tuple
// (spread_eligible_hosts_total_instance_count_if_all_commands_are_applied, target_instance_count_based_on_spread_weight)
let mut spread_instances: HashMap<String, (usize, usize)> = HashMap::new();
for (spread, target_count) in spread_requirements {
let projected_count: usize = eligible_hosts_instances
.iter()
.filter_map(|(host_id, count)| {
if spread.requirements.iter().all(|(key, value)| {
hosts
.get(host_id)
.unwrap()
.labels
.get(key)
.map(|val| val == value)
.unwrap_or(false)
}) {
Some(count)
} else {
None
}
})
.sum();
spread_instances.insert(spread.name.clone(), (projected_count, *target_count));
}
// Step 4: Compare the tuples' values to detect conflicts
let mut conflicts = Vec::new();
for (spread_name, (projected_count, target_count)) in spread_instances {
if projected_count != target_count {
conflicts.push(format!(
"Spread requirement conflict: {} spread requires {} instances vs {} computed from reconciliation commands",
spread_name, target_count, projected_count
));
}
}
if conflicts.is_empty() {
return None;
}
Some(conflicts.join(", "))
}
#[cfg(test)]
mod test {
use super::*;
@ -481,7 +595,7 @@ mod test {
use anyhow::Result;
use chrono::Utc;
use wadm_types::{Spread, SpreadScalerProperty};
use wadm_types::{api::StatusType, Spread, SpreadScalerProperty};
use wasmcloud_control_interface::Link;
use crate::{
@ -654,59 +768,78 @@ mod test {
#[tokio::test]
async fn can_compute_spread_commands() -> Result<()> {
let lattice_id = "hoohah_multi_stop_component";
let lattice_id = "can_compute_spread_commands";
let component_reference = "fakecloud.azurecr.io/echo:0.3.4".to_string();
let component_id = "fakecloud_azurecr_io_echo_0_3_4".to_string();
let host_id = "NASDASDIMAREALHOST";
let host_id1 = "HOST_ONE";
let host_id2 = "HOST_TWO";
let host_id3 = "HOST_THREE";
let store = Arc::new(TestStore::default());
// STATE SETUP BEGIN, ONE HOST
store
.store(
lattice_id,
host_id.to_string(),
Host {
components: HashMap::new(),
friendly_name: "hey".to_string(),
labels: HashMap::new(),
providers: HashSet::new(),
uptime_seconds: 123,
version: None,
id: host_id.to_string(),
last_seen: Utc::now(),
},
)
.await?;
// Create three hosts with different labels
let mut host1_labels = HashMap::new();
host1_labels.insert("zone".to_string(), "east".to_string());
// Ensure we compute if a weights aren't specified
let complex_spread = SpreadScalerProperty {
let mut host2_labels = HashMap::new();
host2_labels.insert("zone".to_string(), "west".to_string());
let mut host3_labels = HashMap::new();
host3_labels.insert("zone".to_string(), "central".to_string());
// Store the hosts
for (host_id, labels) in [
(host_id1, host1_labels),
(host_id2, host2_labels),
(host_id3, host3_labels),
] {
store
.store(
lattice_id,
host_id.to_string(),
Host {
components: HashMap::new(),
friendly_name: format!("host_{}", host_id),
labels,
providers: HashSet::new(),
uptime_seconds: 123,
version: None,
id: host_id.to_string(),
last_seen: Utc::now(),
},
)
.await?;
}
// Create spread requirements that map to specific hosts
let mut east_requirement = BTreeMap::new();
east_requirement.insert("zone".to_string(), "east".to_string());
let mut west_requirement = BTreeMap::new();
west_requirement.insert("zone".to_string(), "west".to_string());
let mut central_requirement = BTreeMap::new();
central_requirement.insert("zone".to_string(), "central".to_string());
let spread_config = SpreadScalerProperty {
instances: 103,
spread: vec![
Spread {
// 9 + 1 (remainder trip)
name: "ComplexOne".to_string(),
requirements: BTreeMap::new(),
name: "EastZone".to_string(),
requirements: east_requirement, // Maps to host1
weight: Some(42),
},
Spread {
// 0
name: "ComplexTwo".to_string(),
requirements: BTreeMap::new(),
name: "WestZone".to_string(),
requirements: west_requirement, // Maps to host2
weight: Some(3),
},
Spread {
// 8
name: "ComplexThree".to_string(),
requirements: BTreeMap::new(),
name: "CentralZone".to_string(),
requirements: central_requirement, // Maps to host3
weight: Some(37),
},
Spread {
// 84 + 1 (remainder trip)
name: "ComplexFour".to_string(),
requirements: BTreeMap::new(),
weight: Some(384),
},
],
};
@ -716,38 +849,46 @@ mod test {
component_id.to_string(),
lattice_id.to_string(),
MODEL_NAME.to_string(),
complex_spread,
spread_config,
"fake_component",
vec![],
);
let cmds = spreadscaler.reconcile().await?;
assert_eq!(cmds.len(), 3);
// With weights 42:3:37 and total instances of 103
// EastZone (east) should get (52 + 1) instances
// WestZone (west) should get 3 instances
// CentralZone (central) should get (46 + 1) instances
assert!(cmds.contains(&Command::ScaleComponent(ScaleComponent {
component_id: component_id.to_string(),
reference: component_reference.to_string(),
host_id: host_id.to_string(),
count: 10,
host_id: host_id1.to_string(), // east zone
count: 53,
model_name: MODEL_NAME.to_string(),
annotations: spreadscaler_annotations("ComplexOne", spreadscaler.id()),
annotations: spreadscaler_annotations("EastZone", spreadscaler.id()),
config: vec![]
})));
assert!(cmds.contains(&Command::ScaleComponent(ScaleComponent {
component_id: component_id.to_string(),
reference: component_reference.to_string(),
host_id: host_id.to_string(),
count: 8,
host_id: host_id2.to_string(), // west zone
count: 3,
model_name: MODEL_NAME.to_string(),
annotations: spreadscaler_annotations("ComplexThree", spreadscaler.id()),
annotations: spreadscaler_annotations("WestZone", spreadscaler.id()),
config: vec![]
})));
assert!(cmds.contains(&Command::ScaleComponent(ScaleComponent {
component_id: component_id.to_string(),
reference: component_reference.to_string(),
host_id: host_id.to_string(),
count: 85,
host_id: host_id3.to_string(), // central zone
count: 47,
model_name: MODEL_NAME.to_string(),
annotations: spreadscaler_annotations("ComplexFour", spreadscaler.id()),
annotations: spreadscaler_annotations("CentralZone", spreadscaler.id()),
config: vec![]
})));
@ -1052,114 +1193,6 @@ mod test {
Ok(())
}
#[tokio::test]
async fn can_handle_multiple_spread_matches() -> Result<()> {
let lattice_id = "multiple_spread_matches";
let component_reference = "fakecloud.azurecr.io/echo:0.3.4".to_string();
let component_id = "fakecloud_azurecr_io_echo_0_3_4".to_string();
let host_id = "NASDASDIMAREALHOST";
let store = Arc::new(TestStore::default());
// Run 75% in east, 25% on resilient hosts
let real_spread = SpreadScalerProperty {
instances: 20,
spread: vec![
Spread {
name: "SimpleOne".to_string(),
requirements: BTreeMap::from_iter([("region".to_string(), "east".to_string())]),
weight: Some(75),
},
Spread {
name: "SimpleTwo".to_string(),
requirements: BTreeMap::from_iter([(
"resilient".to_string(),
"true".to_string(),
)]),
weight: Some(25),
},
],
};
let spreadscaler = ComponentSpreadScaler::new(
store.clone(),
component_reference.to_string(),
component_id.to_string(),
lattice_id.to_string(),
MODEL_NAME.to_string(),
real_spread,
"fake_component",
vec![],
);
// STATE SETUP BEGIN, ONE HOST
store
.store(
lattice_id,
host_id.to_string(),
Host {
components: HashMap::from_iter([(component_id.to_string(), 10)]),
friendly_name: "hey".to_string(),
labels: HashMap::from_iter([
("region".to_string(), "east".to_string()),
("resilient".to_string(), "true".to_string()),
]),
providers: HashSet::new(),
uptime_seconds: 123,
version: None,
id: host_id.to_string(),
last_seen: Utc::now(),
},
)
.await?;
store
.store(
lattice_id,
component_id.to_string(),
Component {
id: component_id.to_string(),
name: "Faketor".to_string(),
issuer: "AASDASDASDASD".to_string(),
instances: HashMap::from_iter([(
host_id.to_string(),
// 10 instances on this host under the first spread
HashSet::from_iter([WadmComponentInfo {
count: 10,
annotations: spreadscaler_annotations("SimpleOne", spreadscaler.id()),
}]),
)]),
reference: component_reference.to_string(),
},
)
.await?;
let cmds = spreadscaler.reconcile().await?;
assert_eq!(cmds.len(), 2);
// Should be enforcing 10 instances per spread
assert!(cmds.contains(&Command::ScaleComponent(ScaleComponent {
component_id: "fakecloud_azurecr_io_echo_0_3_4".to_string(),
reference: component_reference.to_string(),
host_id: host_id.to_string(),
count: 15,
model_name: MODEL_NAME.to_string(),
annotations: spreadscaler_annotations("SimpleOne", spreadscaler.id()),
config: vec![]
})));
assert!(cmds.contains(&Command::ScaleComponent(ScaleComponent {
component_id: "fakecloud_azurecr_io_echo_0_3_4".to_string(),
reference: component_reference.to_string(),
host_id: host_id.to_string(),
count: 5,
model_name: MODEL_NAME.to_string(),
annotations: spreadscaler_annotations("SimpleTwo", spreadscaler.id()),
config: vec![]
})));
Ok(())
}
#[tokio::test]
async fn calculates_proper_scale_commands() -> Result<()> {
let lattice_id = "calculates_proper_scale_commands";
@ -1676,4 +1709,358 @@ mod test {
.iter()
.any(|(id, _host)| *id == "NASDASDIMAREALHOST4"));
}
#[tokio::test]
async fn can_detect_spread_requirement_conflicts_1() -> Result<()> {
let lattice_id = "spread_requirement_conflicts";
let component_reference = "fakecloud.azurecr.io/echo:0.1.0".to_string();
let component_id = "fakecloud_azurecr_io_echo_0_1_0".to_string();
let component_name = "fakecomponent".to_string();
let store = Arc::new(TestStore::default());
let host_id_1 = "NASDASDIMAREALHOST1";
let host_id_2 = "NASDASDIMAREALHOST2";
let host_id_3 = "NASDASDIMAREALHOST3";
let host_id_4 = "NASDASDIMAREALHOST4";
// Create hosts with the specified labels and add them to the store
store
.store(
lattice_id,
host_id_1.to_string(),
Host {
components: HashMap::new(),
friendly_name: "node1".to_string(),
labels: HashMap::from_iter([
("region".to_string(), "us-east-1".to_string()),
("cloud".to_string(), "real".to_string()),
]),
providers: HashSet::new(),
uptime_seconds: 123,
version: None,
id: host_id_1.to_string(),
last_seen: Utc::now(),
},
)
.await?;
store
.store(
lattice_id,
host_id_2.to_string(),
Host {
components: HashMap::new(),
friendly_name: "node2".to_string(),
labels: HashMap::from_iter([
("region".to_string(), "us-east-2".to_string()),
("cloud".to_string(), "real".to_string()),
]),
providers: HashSet::new(),
uptime_seconds: 123,
version: None,
id: host_id_2.to_string(),
last_seen: Utc::now(),
},
)
.await?;
store
.store(
lattice_id,
host_id_3.to_string(),
Host {
components: HashMap::new(),
friendly_name: "node3".to_string(),
labels: HashMap::from_iter([
("region".to_string(), "us-west-1".to_string()),
("cloud".to_string(), "real".to_string()),
]),
providers: HashSet::new(),
uptime_seconds: 123,
version: None,
id: host_id_3.to_string(),
last_seen: Utc::now(),
},
)
.await?;
store
.store(
lattice_id,
host_id_4.to_string(),
Host {
components: HashMap::new(),
friendly_name: "node4".to_string(),
labels: HashMap::from_iter([
("region".to_string(), "us-west-2".to_string()),
("cloud".to_string(), "real".to_string()),
]),
providers: HashSet::new(),
uptime_seconds: 123,
version: None,
id: host_id_4.to_string(),
last_seen: Utc::now(),
},
)
.await?;
let spread_property = SpreadScalerProperty {
instances: 12,
spread: vec![
Spread {
name: "eastcoast".to_string(),
requirements: BTreeMap::from([("region".to_string(), "us-east-1".to_string())]),
weight: Some(25),
},
Spread {
name: "westcoast".to_string(),
requirements: BTreeMap::from([("region".to_string(), "us-west-1".to_string())]),
weight: Some(25),
},
Spread {
name: "realcloud".to_string(),
requirements: BTreeMap::from([("cloud".to_string(), "real".to_string())]),
weight: Some(50),
},
],
};
let spreadscaler = ComponentSpreadScaler::new(
store.clone(),
component_reference.to_string(),
component_id.to_string(),
lattice_id.to_string(),
MODEL_NAME.to_string(),
spread_property,
&component_name,
vec![],
);
spreadscaler.reconcile().await?;
// Check the status after reconciliation
let status = spreadscaler.status().await;
assert_eq!(status.status_type, StatusType::Failed,);
println!("{:?}", status);
assert!(status.message.contains(&format!(
"Spread requirement conflict: {} spread requires {} instances",
"realcloud", 6
)));
Ok(())
}
#[tokio::test]
async fn can_detect_spread_requirement_conflicts_2() -> Result<()> {
let lattice_id = "spread_requirement_conflicts";
let component_reference = "fakecloud.azurecr.io/echo:0.1.0";
let component_id = "fakecloud_azurecr_io_echo_0_1_0";
let component_name = "fakecomponent";
let store = Arc::new(TestStore::default());
let host_id_1 = "NASDASDIMAREALHOST1";
let host_id_2 = "NASDASDIMAREALHOST2";
let host_id_3 = "NASDASDIMAREALHOST3";
let host_id_4 = "NASDASDIMAREALHOST4";
let spread_property = SpreadScalerProperty {
instances: 12,
spread: vec![
Spread {
name: "eastcoast".to_string(),
requirements: BTreeMap::from([("region".to_string(), "us-east-1".to_string())]),
weight: Some(25),
},
Spread {
name: "westcoast".to_string(),
requirements: BTreeMap::from([("region".to_string(), "us-west-1".to_string())]),
weight: Some(25),
},
Spread {
name: "realcloud".to_string(),
requirements: BTreeMap::from([("cloud".to_string(), "real".to_string())]),
weight: Some(50),
},
],
};
let spreadscaler = ComponentSpreadScaler::new(
store.clone(),
component_reference.to_string(),
component_id.to_string(),
lattice_id.to_string(),
MODEL_NAME.to_string(),
spread_property,
component_name,
vec![],
);
// Create components with the specified labels and add them to the store
store
.store(
lattice_id,
component_id.to_string(),
Component {
id: component_id.to_string(),
name: component_name.to_string(),
issuer: "AASDASDASDASD".to_string(),
instances: HashMap::from_iter([
(
host_id_1.to_string(),
// 1 (realcloud) + 11 (eastcoast) = 12 instances on this host
HashSet::from_iter([
WadmComponentInfo {
count: 1,
annotations: spreadscaler_annotations(
"realcloud",
spreadscaler.id(),
),
},
WadmComponentInfo {
count: 11,
annotations: spreadscaler_annotations(
"eastcoast",
spreadscaler.id(),
),
},
]),
),
(
host_id_2.to_string(),
// 0 instances on this host
HashSet::from_iter([]),
),
(
host_id_3.to_string(),
// 2 (realcloud) + 33 (west) = 35 instances on this host
HashSet::from_iter([
WadmComponentInfo {
count: 2,
annotations: spreadscaler_annotations(
"realcloud",
spreadscaler.id(),
),
},
WadmComponentInfo {
count: 33,
annotations: spreadscaler_annotations(
"westcoast",
spreadscaler.id(),
),
},
]),
),
(
host_id_4.to_string(),
// 44 (realcloud) instances on this host
HashSet::from_iter([WadmComponentInfo {
count: 44,
annotations: spreadscaler_annotations(
"realcloud",
spreadscaler.id(),
),
}]),
),
]),
reference: component_reference.to_string(),
},
)
.await?;
// Create hosts with the specified labels and add them to the store
store
.store(
lattice_id,
host_id_1.to_string(),
Host {
components: HashMap::from_iter([(component_id.to_string(), 12)]),
friendly_name: "node1".to_string(),
labels: HashMap::from_iter([
("region".to_string(), "us-east-1".to_string()),
("cloud".to_string(), "real".to_string()),
]),
providers: HashSet::new(),
uptime_seconds: 123,
version: None,
id: host_id_1.to_string(),
last_seen: Utc::now(),
},
)
.await?;
store
.store(
lattice_id,
host_id_2.to_string(),
Host {
components: HashMap::from_iter([(component_id.to_string(), 0)]),
friendly_name: "node2".to_string(),
labels: HashMap::from_iter([
("region".to_string(), "us-east-2".to_string()),
("cloud".to_string(), "real".to_string()),
]),
providers: HashSet::new(),
uptime_seconds: 123,
version: None,
id: host_id_2.to_string(),
last_seen: Utc::now(),
},
)
.await?;
store
.store(
lattice_id,
host_id_3.to_string(),
Host {
components: HashMap::from_iter([(component_id.to_string(), 35)]),
friendly_name: "node3".to_string(),
labels: HashMap::from_iter([
("region".to_string(), "us-west-1".to_string()),
("cloud".to_string(), "real".to_string()),
]),
providers: HashSet::new(),
uptime_seconds: 123,
version: None,
id: host_id_3.to_string(),
last_seen: Utc::now(),
},
)
.await?;
store
.store(
lattice_id,
host_id_4.to_string(),
Host {
components: HashMap::from_iter([(component_id.to_string(), 44)]),
friendly_name: "node4".to_string(),
labels: HashMap::from_iter([
("region".to_string(), "us-west-2".to_string()),
("cloud".to_string(), "real".to_string()),
]),
providers: HashSet::new(),
uptime_seconds: 123,
version: None,
id: host_id_4.to_string(),
last_seen: Utc::now(),
},
)
.await?;
spreadscaler.reconcile().await?;
// Check the status after reconciliation
let status = spreadscaler.status().await;
assert_eq!(status.status_type, StatusType::Failed,);
println!("{:?}", status);
assert!(status.message.contains(&format!(
"Spread requirement conflict: {} spread requires {} instances",
"realcloud", 6
)));
Ok(())
}
}

View File

@ -7,12 +7,16 @@ use anyhow::Result;
use async_trait::async_trait;
use tokio::sync::{OnceCell, RwLock};
use tracing::{instrument, trace};
use wadm_types::{api::StatusInfo, Spread, SpreadScalerProperty, TraitProperty};
use wadm_types::{
api::{StatusInfo, StatusType},
Spread, SpreadScalerProperty, TraitProperty,
};
use crate::{
commands::{Command, StartProvider, StopProvider},
events::{
Event, HostHeartbeat, HostStarted, HostStopped, ProviderInfo, ProviderStarted,
ConfigSet, Event, HostHeartbeat, HostStarted, HostStopped, ProviderHealthCheckFailed,
ProviderHealthCheckInfo, ProviderHealthCheckPassed, ProviderInfo, ProviderStarted,
ProviderStopped,
},
scaler::{
@ -22,7 +26,7 @@ use crate::{
},
Scaler,
},
storage::{Host, ReadStore},
storage::{Host, Provider, ProviderStatus, ReadStore},
SCALER_KEY,
};
@ -115,6 +119,65 @@ impl<S: ReadStore + Send + Sync + Clone> Scaler for ProviderSpreadScaler<S> {
{
self.reconcile().await
}
// perform status updates for health check events
Event::ProviderHealthCheckFailed(ProviderHealthCheckFailed {
data: ProviderHealthCheckInfo { provider_id, .. },
})
| Event::ProviderHealthCheckPassed(ProviderHealthCheckPassed {
data: ProviderHealthCheckInfo { provider_id, .. },
}) if provider_id == &self.config.provider_id => {
let provider = self
.store
.get::<Provider>(&self.config.lattice_id, &self.config.provider_id)
.await?;
let unhealthy_providers = provider.map_or(0, |p| {
p.hosts
.values()
.filter(|s| *s == &ProviderStatus::Failed)
.count()
});
let status = self.status.read().await.to_owned();
// update health status of scaler
if let Some(status) = match (status, unhealthy_providers > 0) {
// scaler is deployed but contains unhealthy providers
(
StatusInfo {
status_type: StatusType::Deployed,
..
},
true,
) => Some(StatusInfo::failed(&format!(
"Unhealthy provider on {} host(s)",
unhealthy_providers
))),
// scaler can become unhealthy only if it was previously deployed
// once scaler becomes healthy again revert back to deployed state
// this is a workaround to detect unhealthy status until
// StatusType::Unhealthy can be used
(
StatusInfo {
status_type: StatusType::Failed,
message,
},
false,
) if message.starts_with("Unhealthy provider on") => {
Some(StatusInfo::deployed(""))
}
// don't update status if scaler is not deployed
_ => None,
} {
*self.status.write().await = status;
}
// only status needs update no new commands required
Ok(Vec::new())
}
Event::ConfigSet(ConfigSet { config_name })
if self.config.provider_config.contains(config_name) =>
{
self.reconcile().await
}
// No other event impacts the job of this scaler so we can ignore it
_ => Ok(Vec::new()),
}
@ -1206,4 +1269,274 @@ mod test {
Ok(())
}
#[tokio::test]
async fn test_healthy_providers_return_healthy_status() -> Result<()> {
let lattice_id = "test_healthy_providers";
let provider_ref = "fakecloud.azurecr.io/provider:3.2.1".to_string();
let provider_id = "VASDASDIAMAREALPROVIDERPROVIDER";
let host_id_one = "NASDASDIMAREALHOSTONE";
let host_id_two = "NASDASDIMAREALHOSTTWO";
let store = Arc::new(TestStore::default());
store
.store(
lattice_id,
host_id_one.to_string(),
Host {
components: HashMap::new(),
friendly_name: "hey".to_string(),
labels: HashMap::from_iter([
("cloud".to_string(), "fake".to_string()),
("region".to_string(), "us-noneofyourbusiness-1".to_string()),
]),
providers: HashSet::from_iter([ProviderInfo {
provider_id: provider_id.to_string(),
provider_ref: provider_ref.to_string(),
annotations: BTreeMap::default(),
}]),
uptime_seconds: 123,
version: None,
id: host_id_one.to_string(),
last_seen: Utc::now(),
},
)
.await?;
// Ensure we spread evenly with equal weights, clean division
let multi_spread_even = SpreadScalerProperty {
instances: 2,
spread: vec![Spread {
name: "SimpleOne".to_string(),
requirements: BTreeMap::from_iter([("cloud".to_string(), "fake".to_string())]),
weight: Some(100),
}],
};
let spreadscaler = ProviderSpreadScaler::new(
store.clone(),
ProviderSpreadConfig {
lattice_id: lattice_id.to_string(),
provider_reference: provider_ref.to_string(),
provider_id: provider_id.to_string(),
spread_config: multi_spread_even,
model_name: MODEL_NAME.to_string(),
provider_config: vec!["foobar".to_string()],
},
"fake_component",
);
store
.store(
lattice_id,
host_id_two.to_string(),
Host {
components: HashMap::new(),
friendly_name: "hey".to_string(),
labels: HashMap::from_iter([
("cloud".to_string(), "fake".to_string()),
("region".to_string(), "us-yourhouse-1".to_string()),
]),
providers: HashSet::from_iter([ProviderInfo {
provider_id: provider_id.to_string(),
provider_ref: provider_ref.to_string(),
annotations: spreadscaler_annotations("SimpleOne", spreadscaler.id()),
}]),
uptime_seconds: 123,
version: None,
id: host_id_two.to_string(),
last_seen: Utc::now(),
},
)
.await?;
store
.store(
lattice_id,
provider_id.to_string(),
Provider {
id: provider_id.to_string(),
name: "provider".to_string(),
issuer: "issuer".to_string(),
reference: provider_ref.to_string(),
hosts: HashMap::from([
(host_id_one.to_string(), ProviderStatus::Failed),
(host_id_two.to_string(), ProviderStatus::Running),
]),
},
)
.await?;
spreadscaler.reconcile().await?;
spreadscaler
.handle_event(&Event::ProviderHealthCheckFailed(
ProviderHealthCheckFailed {
data: ProviderHealthCheckInfo {
provider_id: provider_id.to_string(),
host_id: host_id_one.to_string(),
},
},
))
.await?;
store
.store(
lattice_id,
provider_id.to_string(),
Provider {
id: provider_id.to_string(),
name: "provider".to_string(),
issuer: "issuer".to_string(),
reference: provider_ref.to_string(),
hosts: HashMap::from([
(host_id_one.to_string(), ProviderStatus::Pending),
(host_id_two.to_string(), ProviderStatus::Running),
]),
},
)
.await?;
spreadscaler
.handle_event(&Event::ProviderHealthCheckPassed(
ProviderHealthCheckPassed {
data: ProviderHealthCheckInfo {
provider_id: provider_id.to_string(),
host_id: host_id_two.to_string(),
},
},
))
.await?;
assert_eq!(
spreadscaler.status.read().await.to_owned(),
StatusInfo::deployed("")
);
Ok(())
}
#[tokio::test]
async fn test_unhealthy_providers_return_unhealthy_status() -> Result<()> {
let lattice_id = "test_unhealthy_providers";
let provider_ref = "fakecloud.azurecr.io/provider:3.2.1".to_string();
let provider_id = "VASDASDIAMAREALPROVIDERPROVIDER";
let host_id_one = "NASDASDIMAREALHOSTONE";
let host_id_two = "NASDASDIMAREALHOSTTWO";
let store = Arc::new(TestStore::default());
store
.store(
lattice_id,
host_id_one.to_string(),
Host {
components: HashMap::new(),
friendly_name: "hey".to_string(),
labels: HashMap::from_iter([
("cloud".to_string(), "fake".to_string()),
("region".to_string(), "us-noneofyourbusiness-1".to_string()),
]),
providers: HashSet::from_iter([ProviderInfo {
provider_id: provider_id.to_string(),
provider_ref: provider_ref.to_string(),
annotations: BTreeMap::default(),
}]),
uptime_seconds: 123,
version: None,
id: host_id_one.to_string(),
last_seen: Utc::now(),
},
)
.await?;
// Ensure we spread evenly with equal weights, clean division
let multi_spread_even = SpreadScalerProperty {
instances: 2,
spread: vec![Spread {
name: "SimpleOne".to_string(),
requirements: BTreeMap::from_iter([("cloud".to_string(), "fake".to_string())]),
weight: Some(100),
}],
};
let spreadscaler = ProviderSpreadScaler::new(
store.clone(),
ProviderSpreadConfig {
lattice_id: lattice_id.to_string(),
provider_reference: provider_ref.to_string(),
provider_id: provider_id.to_string(),
spread_config: multi_spread_even,
model_name: MODEL_NAME.to_string(),
provider_config: vec!["foobar".to_string()],
},
"fake_component",
);
store
.store(
lattice_id,
host_id_two.to_string(),
Host {
components: HashMap::new(),
friendly_name: "hey".to_string(),
labels: HashMap::from_iter([
("cloud".to_string(), "fake".to_string()),
("region".to_string(), "us-yourhouse-1".to_string()),
]),
providers: HashSet::from_iter([ProviderInfo {
provider_id: provider_id.to_string(),
provider_ref: provider_ref.to_string(),
annotations: spreadscaler_annotations("SimpleOne", spreadscaler.id()),
}]),
uptime_seconds: 123,
version: None,
id: host_id_two.to_string(),
last_seen: Utc::now(),
},
)
.await?;
store
.store(
lattice_id,
provider_id.to_string(),
Provider {
id: provider_id.to_string(),
name: "provider".to_string(),
issuer: "issuer".to_string(),
reference: provider_ref.to_string(),
hosts: HashMap::from([
(host_id_one.to_string(), ProviderStatus::Failed),
(host_id_two.to_string(), ProviderStatus::Running),
]),
},
)
.await?;
spreadscaler.reconcile().await?;
spreadscaler
.handle_event(&Event::ProviderHealthCheckFailed(
ProviderHealthCheckFailed {
data: ProviderHealthCheckInfo {
provider_id: provider_id.to_string(),
host_id: host_id_one.to_string(),
},
},
))
.await?;
assert_eq!(
spreadscaler.status.read().await.to_owned(),
StatusInfo::failed("Unhealthy provider on 1 host(s)")
);
Ok(())
}
}

View File

@ -0,0 +1,66 @@
use anyhow::Result;
use async_trait::async_trait;
use wadm_types::{api::StatusInfo, TraitProperty};
use crate::{commands::Command, events::Event, scaler::Scaler};
/// The StatusScaler is a scaler that only reports a predefined status and does not perform any actions.
/// It's primarily used as a placeholder for a scaler that wadm failed to initialize for reasons that
/// couldn't be caught during deployment, and will not be fixed until a new version of the app is deployed.
pub struct StatusScaler {
id: String,
kind: String,
name: String,
status: StatusInfo,
}
#[async_trait]
impl Scaler for StatusScaler {
fn id(&self) -> &str {
&self.id
}
fn kind(&self) -> &str {
&self.kind
}
fn name(&self) -> String {
self.name.to_string()
}
async fn status(&self) -> StatusInfo {
self.status.clone()
}
async fn update_config(&mut self, _config: TraitProperty) -> Result<Vec<Command>> {
Ok(vec![])
}
async fn handle_event(&self, _event: &Event) -> Result<Vec<Command>> {
Ok(Vec::with_capacity(0))
}
async fn reconcile(&self) -> Result<Vec<Command>> {
Ok(Vec::with_capacity(0))
}
async fn cleanup(&self) -> Result<Vec<Command>> {
Ok(Vec::with_capacity(0))
}
}
impl StatusScaler {
pub fn new(
id: impl AsRef<str>,
kind: impl AsRef<str>,
name: impl AsRef<str>,
status: StatusInfo,
) -> Self {
StatusScaler {
id: id.as_ref().to_string(),
kind: kind.as_ref().to_string(),
name: name.as_ref().to_string(),
status,
}
}
}

View File

@ -69,8 +69,7 @@ impl<P: Publisher> Handler<P> {
self.send_error(
msg.reply,
format!(
"Manifest name {} contains invalid characters. Manifest names can only contain alphanumeric characters, dashes, and underscores.",
manifest_name
"Manifest name {manifest_name} contains invalid characters. Manifest names can only contain alphanumeric characters, dashes, and underscores.",
),
)
.await;
@ -89,11 +88,47 @@ impl<P: Publisher> Handler<P> {
}
};
if let Some(error_message) = validate_manifest(manifest.clone()).await.err() {
if let Some(error_message) = validate_manifest(&manifest).await.err() {
self.send_error(msg.reply, error_message.to_string()).await;
return;
}
let all_stored_manifests = self
.store
.list(account_id, lattice_id)
.await
.unwrap_or_default();
let deployed_shared_apps: Vec<&Manifest> = all_stored_manifests
.iter()
// Only keep deployed, shared applications
.filter(|manifest| {
manifest.deployed_version().is_some() && manifest.get_current().shared()
})
.map(|manifest| manifest.get_current())
.collect();
// NOTE(brooksmtownsend): You can put an application with missing shared components, because
// the time where you truly need them is when you deploy the application. This can cause a bit
// of friction when it comes to deploy, but it avoids the frustrating race condition where you
// - Put the application looking for a deployed shared component
// - Undeploy the application with the shared component
// - Deploy the new application looking for the shared component (error)
let missing_shared_components = manifest.missing_shared_components(&deployed_shared_apps);
let message = if missing_shared_components.is_empty() {
format!(
"Successfully put manifest {} {}",
manifest_name,
current_manifests.current_version().to_owned()
)
} else {
format!(
"Successfully put manifest {} {}, but some shared components are not deployed: {:?}",
manifest_name,
current_manifests.current_version().to_owned(),
missing_shared_components
)
};
let incoming_version = manifest.version().to_owned();
if !current_manifests.add_version(manifest) {
self.send_error(
@ -114,11 +149,7 @@ impl<P: Publisher> Handler<P> {
},
name: manifest_name.clone(),
total_versions: current_manifests.count(),
message: format!(
"Successfully put manifest {} {}",
manifest_name,
current_manifests.current_version()
),
message,
};
trace!(total_manifests = %resp.total_versions, "Storing manifests");
@ -381,97 +412,110 @@ impl<P: Publisher> Handler<P> {
}
}
};
let reply_data = if let Some(version) = req.version {
// TODO(#451): if shared and deployed, make sure that no other shared apps are using it
let reply_data = {
match self.store.get(account_id, lattice_id, name).await {
Ok(Some((mut current, current_revision))) => {
let deleted = current.delete_version(&version);
if deleted && !current.is_empty() {
// If the version we deleted was the deployed one, undeploy it
let deployed_version = current.deployed_version();
let undeploy = if deployed_version.map(|v| v == version).unwrap_or(false) {
trace!(?deployed_version, deleted_version = %version, "Deployed version matches deleted. Will undeploy");
current.undeploy();
true
if let Some(version) = req.version {
let deleted = current.delete_version(&version);
if deleted && !current.is_empty() {
// If the version we deleted was the deployed one, undeploy it
let deployed_version = current.deployed_version();
let undeploy = if deployed_version
.map(|v| v == version)
.unwrap_or(false)
{
trace!(?deployed_version, deleted_version = %version, "Deployed version matches deleted. Will undeploy");
current.undeploy();
true
} else {
trace!(?deployed_version, deleted_version = %version, "Deployed version does not match deleted version. Will not undeploy");
false
};
self.store
.set(account_id, lattice_id, current, Some(current_revision))
.await
.map(|_| DeleteModelResponse {
result: DeleteResult::Deleted,
message: format!(
"Successfully deleted version {version} of application {name}"
),
undeploy,
})
.unwrap_or_else(|e| {
error!(error = %e, "Unable to delete data");
DeleteModelResponse {
result: DeleteResult::Error,
message: format!(
"Internal storage error when deleting {version} of application {name}"
),
undeploy: false,
}
})
} else if deleted && current.is_empty() {
// If we deleted the last one, delete the model from the store
self.store
.delete(account_id, lattice_id, name)
.await
.map(|_| DeleteModelResponse {
result: DeleteResult::Deleted,
message: format!(
"Successfully deleted last version of application {name}"
),
// By default if it is all gone, we definitely undeployed things
undeploy: true,
})
.unwrap_or_else(|e| {
error!(error = %e, "Unable to delete data");
DeleteModelResponse {
result: DeleteResult::Deleted,
message: format!(
"Internal storage error when deleting {version} of application {name}"
),
undeploy: false,
}
})
} else {
trace!(?deployed_version, deleted_version = %version, "Deployed version does not match deleted version. Will not undeploy");
false
};
self.store
.set(account_id, lattice_id, current, Some(current_revision))
.await
.map(|_| DeleteModelResponse {
result: DeleteResult::Deleted,
message: format!(
"Successfully deleted version {version} of application {name}"
),
undeploy,
})
.unwrap_or_else(|e| {
DeleteModelResponse {
result: DeleteResult::Noop,
message: format!("Application version {version} doesn't exist"),
undeploy: false,
}
}
} else {
match self.store.delete(account_id, lattice_id, name).await {
Ok(_) => {
DeleteModelResponse {
result: DeleteResult::Deleted,
message: format!("Successfully deleted application {name}"),
// By default if it is all gone, we definitely undeployed things
undeploy: true,
}
}
Err(e) => {
error!(error = %e, "Unable to delete data");
DeleteModelResponse {
result: DeleteResult::Error,
message: "Internal storage error".to_string(),
message: format!(
"Internal storage error when deleting application {name}"
),
undeploy: false,
}
})
} else if deleted && current.is_empty() {
// If we deleted the last one, delete the model from the store
self.store
.delete(account_id, lattice_id, name)
.await
.map(|_| DeleteModelResponse {
result: DeleteResult::Deleted,
message: format!(
"Successfully deleted last version of application {name}"
),
// By default if it is all gone, we definitely undeployed things
undeploy: true,
})
.unwrap_or_else(|e| {
error!(error = %e, "Unable to delete data");
DeleteModelResponse {
result: DeleteResult::Deleted,
message: "Internal storage error".to_string(),
undeploy: false,
}
})
} else {
DeleteModelResponse {
result: DeleteResult::Noop,
message: format!("Application version {version} doesn't exist"),
undeploy: false,
}
}
}
}
Ok(None) => DeleteModelResponse {
result: DeleteResult::Noop,
message: format!("Application {name} doesn't exist"),
message: format!("Application {name} doesn't exist or was already deleted"),
undeploy: false,
},
Err(e) => {
error!(error = %e, "Unable to fetch current data data");
error!(error = %e, "Unable to fetch current manifest data for application {name}");
DeleteModelResponse {
result: DeleteResult::Error,
message: "Internal storage error".to_string(),
undeploy: false,
}
}
}
} else {
match self.store.delete(account_id, lattice_id, name).await {
Ok(_) => {
DeleteModelResponse {
result: DeleteResult::Deleted,
message: format!("Successfully deleted application {}", name),
// By default if it is all gone, we definitely undeployed things
undeploy: true,
}
}
Err(e) => {
error!(error = %e, "Unable to delete data");
DeleteModelResponse {
result: DeleteResult::Error,
message: "Internal storage error".to_string(),
message: format!("Internal storage error while fetching manifest data for application {name}"),
undeploy: false,
}
}
@ -673,6 +717,25 @@ impl<P: Publisher> Handler<P> {
}
}
// TODO(#451): If this app is shared, or the previous version was, make sure that shared
// components that have dependent applications are still present
let deployed_apps: Vec<&Manifest> = stored_models
.iter()
.filter(|a| a.deployed_version().is_some() && a.get_current().shared())
.map(|a| a.get_current())
.collect();
let missing_shared_components = staged_model.missing_shared_components(&deployed_apps);
// Ensure all shared components point to a valid component that is deployed in another application
if !missing_shared_components.is_empty() {
self.send_error(
msg.reply,
format!("Application contains shared components that are not deployed in other applications: {:?}", missing_shared_components.iter().map(|c| &c.name).collect::<Vec<_>>())
).await;
return;
}
if !manifests.deploy(req.version.clone()) {
trace!("Requested version does not exist");
self.send_reply(
@ -801,6 +864,7 @@ impl<P: Publisher> Handler<P> {
return;
}
};
// TODO(#451): if shared, make sure that no other shared apps are using it
let reply = if manifests.undeploy() {
trace!("Manifest undeployed. Storing updated manifest");
@ -963,8 +1027,8 @@ fn summary_from_manifest_status(manifest: StoredManifest, status: Status) -> Mod
}
// Manifest validation
pub(crate) async fn validate_manifest(manifest: Manifest) -> anyhow::Result<()> {
let failures = wadm_types::validation::validate_manifest(&manifest).await?;
pub(crate) async fn validate_manifest(manifest: &Manifest) -> anyhow::Result<()> {
let failures = wadm_types::validation::validate_manifest(manifest).await?;
for failure in failures {
if matches!(
failure.level,
@ -999,12 +1063,12 @@ mod test {
let correct_manifest = deserialize_yaml("../../tests/fixtures/manifests/simple.yaml")
.expect("Should be able to parse");
assert!(validate_manifest(correct_manifest).await.is_ok());
assert!(validate_manifest(&correct_manifest).await.is_ok());
let manifest = deserialize_yaml("../../tests/fixtures/manifests/incorrect_component.yaml")
.expect("Should be able to parse");
match validate_manifest(manifest).await {
match validate_manifest(&manifest).await {
Ok(()) => panic!("Should have detected incorrect component"),
Err(e) => {
assert!(e
@ -1016,7 +1080,7 @@ mod test {
let manifest = deserialize_yaml("../../tests/fixtures/manifests/duplicate_component.yaml")
.expect("Should be able to parse");
match validate_manifest(manifest).await {
match validate_manifest(&manifest).await {
Ok(()) => panic!("Should have detected duplicate component"),
Err(e) => assert!(e
.to_string()
@ -1026,7 +1090,7 @@ mod test {
let manifest = deserialize_yaml("../../tests/fixtures/manifests/duplicate_id1.yaml")
.expect("Should be able to parse");
match validate_manifest(manifest).await {
match validate_manifest(&manifest).await {
Ok(()) => {
panic!("Should have detected duplicate component ID in provider properties")
}
@ -1038,7 +1102,7 @@ mod test {
let manifest = deserialize_yaml("../../tests/fixtures/manifests/duplicate_id2.yaml")
.expect("Should be able to parse");
match validate_manifest(manifest).await {
match validate_manifest(&manifest).await {
Ok(()) => panic!("Should have detected duplicate component ID in component properties"),
Err(e) => assert!(e
.to_string()
@ -1049,12 +1113,41 @@ mod test {
deserialize_yaml("../../tests/fixtures/manifests/missing_capability_component.yaml")
.expect("Should be able to parse");
match validate_manifest(manifest).await {
match validate_manifest(&manifest).await {
Ok(()) => panic!("Should have detected missing capability component"),
Err(e) => assert!(e
.to_string()
.contains("The following capability component(s) are missing from the manifest: ")),
}
let manifest = deserialize_yaml("../../tests/fixtures/manifests/duplicate_links.yaml")
.expect("Should be able to parse");
match validate_manifest(&manifest).await {
Ok(()) => panic!("Should have detected duplicate links"),
Err(e) => assert!(e
.to_string()
.contains("Duplicate link found inside component")),
}
let manifest =
deserialize_yaml("../../tests/fixtures/manifests/correct_unique_interface_links.yaml")
.expect("Should be able to parse");
assert!(validate_manifest(&manifest).await.is_ok());
let manifest = deserialize_yaml(
"../../tests/fixtures/manifests/incorrect_unique_interface_links.yaml",
)
.expect("Should be able to parse");
match validate_manifest(&manifest).await {
Ok(()) => panic!("Should have detected duplicate interface links"),
Err(e) => assert!(
e.to_string()
.contains("Duplicate link found inside component")
&& e.to_string().contains("atomics"),
"Error should mention duplicate interface"
),
}
}
/// Ensure that a long image ref in a manifest works,
@ -1062,7 +1155,7 @@ mod test {
#[tokio::test]
async fn manifest_name_long_image_ref() -> Result<()> {
validate_manifest(
deserialize_yaml("../../tests/fixtures/manifests/long_image_refs.yaml")
&deserialize_yaml("../../tests/fixtures/manifests/long_image_refs.yaml")
.context("failed to deserialize YAML")?,
)
.await

View File

@ -31,7 +31,7 @@ pub struct Provider {
pub hosts: HashMap<String, ProviderStatus>,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
pub enum ProviderStatus {
/// The provider is starting and hasn't returned a heartbeat yet
Pending,
@ -40,6 +40,7 @@ pub enum ProviderStatus {
/// The provider failed to start
// TODO(thomastaylor312): In the future, we'll probably want to decay out a provider from state
// if it hasn't had a heartbeat
// if it fails a recent health check
Failed,
}

View File

@ -1507,16 +1507,8 @@ mod test {
2,
"Should still have 2 components in state"
);
assert_component(
&components,
&component_1_id,
&[(host1_id, 2), (host2_id, 2)],
);
assert_component(
&components,
&component_2_id,
&[(host1_id, 2), (host2_id, 2)],
);
assert_component(&components, component_1_id, &[(host1_id, 2), (host2_id, 2)]);
assert_component(&components, component_2_id, &[(host1_id, 2), (host2_id, 2)]);
/***********************************************************/
/************** Component Scale Down Tests *****************/
@ -1543,12 +1535,8 @@ mod test {
2,
"Should still have 2 components in state"
);
assert_component(&components, &component_1_id, &[(host2_id, 2)]);
assert_component(
&components,
&component_2_id,
&[(host1_id, 2), (host2_id, 2)],
);
assert_component(&components, component_1_id, &[(host2_id, 2)]);
assert_component(&components, component_2_id, &[(host1_id, 2), (host2_id, 2)]);
let host = store
.get::<Host>(lattice_id, host2_id)
@ -1572,11 +1560,7 @@ mod test {
let components = store.list::<Component>(lattice_id).await.unwrap();
assert_eq!(components.len(), 1, "Should only have 1 component in state");
// Double check the the old one is still ok
assert_component(
&components,
&component_2_id,
&[(host1_id, 2), (host2_id, 2)],
);
assert_component(&components, component_2_id, &[(host1_id, 2), (host2_id, 2)]);
/***********************************************************/
/******************* Provider Stop Tests *******************/
@ -1735,11 +1719,7 @@ mod test {
// Double check providers and components are the same
let components = store.list::<Component>(lattice_id).await.unwrap();
assert_eq!(components.len(), 1, "Should only have 1 component in state");
assert_component(
&components,
&component_2_id,
&[(host1_id, 2), (host2_id, 2)],
);
assert_component(&components, component_2_id, &[(host1_id, 2), (host2_id, 2)]);
let providers = store.list::<Provider>(lattice_id).await.unwrap();
assert_eq!(providers.len(), 2, "Should still have 2 providers in state");
@ -1774,7 +1754,7 @@ mod test {
// Double check providers and components are the same
let components = store.list::<Component>(lattice_id).await.unwrap();
assert_eq!(components.len(), 1, "Should only have 1 component in state");
assert_component(&components, &component_2_id, &[(host2_id, 2)]);
assert_component(&components, component_2_id, &[(host2_id, 2)]);
let providers = store.list::<Provider>(lattice_id).await.unwrap();
assert_eq!(providers.len(), 1, "Should now have 1 provider in state");
@ -1857,7 +1837,7 @@ mod test {
])
.host_id(host_id.into())
.providers(vec![ProviderDescription::builder()
.id(&provider_id)
.id(provider_id)
.revision(0)
.build()
.expect("failed to build provider description")])
@ -1893,7 +1873,7 @@ mod test {
labels: HashMap::default(),
issuer: "".to_string(),
providers: vec![ProviderDescription::builder()
.id(&provider_id)
.id(provider_id)
.revision(0)
.build()
.expect("failed to build provider description")],

704
flake.lock Normal file
View File

@ -0,0 +1,704 @@
{
"nodes": {
"advisory-db": {
"flake": false,
"locked": {
"lastModified": 1737565911,
"narHash": "sha256-WxIWw1mSPJVU1JfIcTdIubU5UoIwwR8h7UcXop/6htg=",
"owner": "rustsec",
"repo": "advisory-db",
"rev": "ffa26704690a3dc403edcd94baef103ee48f66eb",
"type": "github"
},
"original": {
"owner": "rustsec",
"repo": "advisory-db",
"type": "github"
}
},
"advisory-db_2": {
"flake": false,
"locked": {
"lastModified": 1730464311,
"narHash": "sha256-9xJoP1766XJSO1Qr0Lxg2P6dwPncTr3BJYlFMSXBd/E=",
"owner": "rustsec",
"repo": "advisory-db",
"rev": "f3460e5ed91658ab94fa41908cfa44991f9f4f02",
"type": "github"
},
"original": {
"owner": "rustsec",
"repo": "advisory-db",
"type": "github"
}
},
"crane": {
"locked": {
"lastModified": 1737689766,
"narHash": "sha256-ivVXYaYlShxYoKfSo5+y5930qMKKJ8CLcAoIBPQfJ6s=",
"owner": "ipetkov",
"repo": "crane",
"rev": "6fe74265bbb6d016d663b1091f015e2976c4a527",
"type": "github"
},
"original": {
"owner": "ipetkov",
"repo": "crane",
"type": "github"
}
},
"crane_2": {
"locked": {
"lastModified": 1730652660,
"narHash": "sha256-+XVYfmVXAiYA0FZT7ijHf555dxCe+AoAT5A6RU+6vSo=",
"owner": "ipetkov",
"repo": "crane",
"rev": "a4ca93905455c07cb7e3aca95d4faf7601cba458",
"type": "github"
},
"original": {
"owner": "ipetkov",
"repo": "crane",
"type": "github"
}
},
"crane_3": {
"inputs": {
"flake-compat": "flake-compat",
"flake-utils": [
"wasmcloud",
"nixify",
"nix-log",
"nixify",
"flake-utils"
],
"nixpkgs": [
"wasmcloud",
"nixify",
"nix-log",
"nixify",
"nixpkgs"
],
"rust-overlay": [
"wasmcloud",
"nixify",
"nix-log",
"nixify",
"rust-overlay"
]
},
"locked": {
"lastModified": 1679255352,
"narHash": "sha256-nkGwGuNkhNrnN33S4HIDV5NzkzMLU5mNStRn9sZwq8c=",
"owner": "rvolosatovs",
"repo": "crane",
"rev": "cec65880599a4ec6426186e24342e663464f5933",
"type": "github"
},
"original": {
"owner": "rvolosatovs",
"ref": "feat/wit",
"repo": "crane",
"type": "github"
}
},
"fenix": {
"inputs": {
"nixpkgs": [
"nixpkgs"
],
"rust-analyzer-src": []
},
"locked": {
"lastModified": 1738132439,
"narHash": "sha256-7q5vsyPQf6/aQEKAOgZ4ggv++Z2ppPSuPCGKlbPcM88=",
"owner": "nix-community",
"repo": "fenix",
"rev": "f94e521c1922784c377a2cace90aa89a6b8a1011",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "fenix",
"type": "github"
}
},
"fenix_2": {
"inputs": {
"nixpkgs": [
"wasmcloud",
"nixify",
"nixpkgs-nixos"
],
"rust-analyzer-src": "rust-analyzer-src"
},
"locked": {
"lastModified": 1731047492,
"narHash": "sha256-F4h8YtTzPWv0/1Z6fc8fMSqKpn7YhOjlgp66cr15tEo=",
"owner": "nix-community",
"repo": "fenix",
"rev": "da6332e801fbb0418f80f20cefa947c5fe5c18c9",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "fenix",
"type": "github"
}
},
"fenix_3": {
"inputs": {
"nixpkgs": [
"wasmcloud",
"nixify",
"nix-log",
"nixify",
"nixpkgs"
],
"rust-analyzer-src": "rust-analyzer-src_2"
},
"locked": {
"lastModified": 1679552560,
"narHash": "sha256-L9Se/F1iLQBZFGrnQJO8c9wE5z0Mf8OiycPGP9Y96hA=",
"owner": "nix-community",
"repo": "fenix",
"rev": "fb49a9f5605ec512da947a21cc7e4551a3950397",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "fenix",
"type": "github"
}
},
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1673956053,
"narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_2": {
"inputs": {
"systems": "systems_2"
},
"locked": {
"lastModified": 1726560853,
"narHash": "sha256-X6rJYSESBVr3hBoH0WbKE5KvhPU5bloyZ2L4K60/fPQ=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "c1dfcf08411b08f6b8615f7d8971a2bfa81d5e8a",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_3": {
"locked": {
"lastModified": 1678901627,
"narHash": "sha256-U02riOqrKKzwjsxc/400XnElV+UtPUQWpANPlyazjH0=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "93a2b84fc4b70d9e089d029deacc3583435c2ed6",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"macos-sdk": {
"flake": false,
"locked": {
"lastModified": 1694769349,
"narHash": "sha256-TEvVJy+NMPyzgWSk/6S29ZMQR+ICFxSdS3tw247uhFc=",
"type": "tarball",
"url": "https://github.com/roblabla/MacOSX-SDKs/releases/download/macosx14.0/MacOSX14.0.sdk.tar.xz"
},
"original": {
"type": "tarball",
"url": "https://github.com/roblabla/MacOSX-SDKs/releases/download/macosx14.0/MacOSX14.0.sdk.tar.xz"
}
},
"nix-filter": {
"locked": {
"lastModified": 1730207686,
"narHash": "sha256-SCHiL+1f7q9TAnxpasriP6fMarWE5H43t25F5/9e28I=",
"owner": "numtide",
"repo": "nix-filter",
"rev": "776e68c1d014c3adde193a18db9d738458cd2ba4",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "nix-filter",
"type": "github"
}
},
"nix-filter_2": {
"locked": {
"lastModified": 1678109515,
"narHash": "sha256-C2X+qC80K2C1TOYZT8nabgo05Dw2HST/pSn6s+n6BO8=",
"owner": "numtide",
"repo": "nix-filter",
"rev": "aa9ff6ce4a7f19af6415fb3721eaa513ea6c763c",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "nix-filter",
"type": "github"
}
},
"nix-flake-tests": {
"locked": {
"lastModified": 1677844186,
"narHash": "sha256-ErJZ/Gs1rxh561CJeWP5bohA2IcTq1rDneu1WT6CVII=",
"owner": "antifuchs",
"repo": "nix-flake-tests",
"rev": "bbd9216bd0f6495bb961a8eb8392b7ef55c67afb",
"type": "github"
},
"original": {
"owner": "antifuchs",
"repo": "nix-flake-tests",
"type": "github"
}
},
"nix-flake-tests_2": {
"locked": {
"lastModified": 1677844186,
"narHash": "sha256-ErJZ/Gs1rxh561CJeWP5bohA2IcTq1rDneu1WT6CVII=",
"owner": "antifuchs",
"repo": "nix-flake-tests",
"rev": "bbd9216bd0f6495bb961a8eb8392b7ef55c67afb",
"type": "github"
},
"original": {
"owner": "antifuchs",
"repo": "nix-flake-tests",
"type": "github"
}
},
"nix-log": {
"inputs": {
"nix-flake-tests": "nix-flake-tests",
"nixify": "nixify_2",
"nixlib": "nixlib_2"
},
"locked": {
"lastModified": 1681933283,
"narHash": "sha256-phDsQdaoUEI4DUTErR6Tz7lS0y3kXvDwwbqtxpzd0eo=",
"owner": "rvolosatovs",
"repo": "nix-log",
"rev": "833d31e3c1a677eac81ba87e777afa5076071d66",
"type": "github"
},
"original": {
"owner": "rvolosatovs",
"repo": "nix-log",
"type": "github"
}
},
"nix-log_2": {
"inputs": {
"nix-flake-tests": "nix-flake-tests_2",
"nixify": [
"wasmcloud",
"wit-deps",
"nixify"
],
"nixlib": [
"wasmcloud",
"wit-deps",
"nixlib"
]
},
"locked": {
"lastModified": 1681933283,
"narHash": "sha256-phDsQdaoUEI4DUTErR6Tz7lS0y3kXvDwwbqtxpzd0eo=",
"owner": "rvolosatovs",
"repo": "nix-log",
"rev": "833d31e3c1a677eac81ba87e777afa5076071d66",
"type": "github"
},
"original": {
"owner": "rvolosatovs",
"repo": "nix-log",
"type": "github"
}
},
"nixify": {
"inputs": {
"advisory-db": "advisory-db_2",
"crane": "crane_2",
"fenix": "fenix_2",
"flake-utils": "flake-utils_2",
"macos-sdk": "macos-sdk",
"nix-filter": "nix-filter",
"nix-log": "nix-log",
"nixlib": [
"wasmcloud",
"nixlib"
],
"nixpkgs-darwin": "nixpkgs-darwin",
"nixpkgs-nixos": "nixpkgs-nixos",
"rust-overlay": "rust-overlay_2"
},
"locked": {
"lastModified": 1731068753,
"narHash": "sha256-6H+vYAYl/koFsiBEM4WHZhOoOQ2Hfzd+MtcxFfAOOtw=",
"owner": "rvolosatovs",
"repo": "nixify",
"rev": "7b83953ebfb22ba1f623ac06312aebee81f2182e",
"type": "github"
},
"original": {
"owner": "rvolosatovs",
"repo": "nixify",
"type": "github"
}
},
"nixify_2": {
"inputs": {
"crane": "crane_3",
"fenix": "fenix_3",
"flake-utils": "flake-utils_3",
"nix-filter": "nix-filter_2",
"nixlib": "nixlib",
"nixpkgs": "nixpkgs_2",
"rust-overlay": "rust-overlay"
},
"locked": {
"lastModified": 1679748566,
"narHash": "sha256-yA4yIJjNCOLoUh0py9S3SywwbPnd/6NPYbXad+JeOl0=",
"owner": "rvolosatovs",
"repo": "nixify",
"rev": "80e823959511a42dfec4409fef406a14ae8240f3",
"type": "github"
},
"original": {
"owner": "rvolosatovs",
"repo": "nixify",
"type": "github"
}
},
"nixlib": {
"locked": {
"lastModified": 1679187309,
"narHash": "sha256-H8udmkg5wppL11d/05MMzOMryiYvc403axjDNZy1/TQ=",
"owner": "nix-community",
"repo": "nixpkgs.lib",
"rev": "44214417fe4595438b31bdb9469be92536a61455",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "nixpkgs.lib",
"type": "github"
}
},
"nixlib_2": {
"locked": {
"lastModified": 1679791877,
"narHash": "sha256-tTV1Mf0hPWIMtqyU16Kd2JUBDWvfHlDC9pF57vcbgpQ=",
"owner": "nix-community",
"repo": "nixpkgs.lib",
"rev": "cc060ddbf652a532b54057081d5abd6144d01971",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "nixpkgs.lib",
"type": "github"
}
},
"nixlib_3": {
"locked": {
"lastModified": 1731200463,
"narHash": "sha256-qDaAweJjdFbVExqs8aG27urUgcgKufkIngHW3Rzustg=",
"owner": "nix-community",
"repo": "nixpkgs.lib",
"rev": "e04234d263750db01c78a412690363dc2226e68a",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "nixpkgs.lib",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1738163270,
"narHash": "sha256-B/7Y1v4y+msFFBW1JAdFjNvVthvNdJKiN6EGRPnqfno=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "59e618d90c065f55ae48446f307e8c09565d5ab0",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "release-24.11",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-darwin": {
"locked": {
"lastModified": 1730891215,
"narHash": "sha256-i85DPrhDuvzgvIWCpJlbfM2UFtNYbapo20MtQXsvay4=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "c128e44a249d6180740d0a979b6480d5b795c013",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixpkgs-24.05-darwin",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-nixos": {
"locked": {
"lastModified": 1730883749,
"narHash": "sha256-mwrFF0vElHJP8X3pFCByJR365Q2463ATp2qGIrDUdlE=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "dba414932936fde69f0606b4f1d87c5bc0003ede",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixos-24.05",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1679577639,
"narHash": "sha256-7u7bsNP0ApBnLgsHVROQ5ytoMqustmMVMgtaFS/P7EU=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "8f1bcd72727c5d4cd775545595d068be410f2a7e",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixpkgs-22.11-darwin",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"advisory-db": "advisory-db",
"crane": "crane",
"fenix": "fenix",
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs",
"wasmcloud": "wasmcloud"
}
},
"rust-analyzer-src": {
"flake": false,
"locked": {
"lastModified": 1730989300,
"narHash": "sha256-ZWSta9893f/uF5PoRFn/BSUAxF4dKW+TIbdA6rZoGBg=",
"owner": "rust-lang",
"repo": "rust-analyzer",
"rev": "1042a8c22c348491a4bade4f664430b03d6f5b5c",
"type": "github"
},
"original": {
"owner": "rust-lang",
"ref": "nightly",
"repo": "rust-analyzer",
"type": "github"
}
},
"rust-analyzer-src_2": {
"flake": false,
"locked": {
"lastModified": 1679520343,
"narHash": "sha256-AJGSGWRfoKWD5IVTu1wEsR990wHbX0kIaolPqNMEh0c=",
"owner": "rust-lang",
"repo": "rust-analyzer",
"rev": "eb791f31e688ae00908eb75d4c704ef60c430a92",
"type": "github"
},
"original": {
"owner": "rust-lang",
"ref": "nightly",
"repo": "rust-analyzer",
"type": "github"
}
},
"rust-overlay": {
"inputs": {
"flake-utils": [
"wasmcloud",
"nixify",
"nix-log",
"nixify",
"flake-utils"
],
"nixpkgs": [
"wasmcloud",
"nixify",
"nix-log",
"nixify",
"nixpkgs"
]
},
"locked": {
"lastModified": 1679537973,
"narHash": "sha256-R6borgcKeyMIjjPeeYsfo+mT8UdS+OwwbhhStdCfEjg=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "fbc7ae3f14d32e78c0e8d7865f865cc28a46b232",
"type": "github"
},
"original": {
"owner": "oxalica",
"repo": "rust-overlay",
"type": "github"
}
},
"rust-overlay_2": {
"inputs": {
"nixpkgs": [
"wasmcloud",
"nixify",
"nixpkgs-nixos"
]
},
"locked": {
"lastModified": 1731032894,
"narHash": "sha256-dQSyYPmrQiPr+PGEd+K8038rubFGz7G/dNXVeaGWE0w=",
"owner": "oxalica",
"repo": "rust-overlay",
"rev": "d52f2a4c103a0acf09ded857b9e2519ae2360e59",
"type": "github"
},
"original": {
"owner": "oxalica",
"repo": "rust-overlay",
"type": "github"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_2": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"wasmcloud": {
"inputs": {
"nixify": "nixify",
"nixlib": "nixlib_3",
"wit-deps": "wit-deps"
},
"locked": {
"lastModified": 1731409523,
"narHash": "sha256-Q/BnuJaMyJfY+p9VpdyBWtRjEo4TdRvFMMhfdDFj6cU=",
"owner": "wasmCloud",
"repo": "wasmCloud",
"rev": "579455058513b907c7df4a4ec13728f83c6b782b",
"type": "github"
},
"original": {
"owner": "wasmCloud",
"ref": "wash-cli-v0.37.0",
"repo": "wasmCloud",
"type": "github"
}
},
"wit-deps": {
"inputs": {
"nix-log": "nix-log_2",
"nixify": [
"wasmcloud",
"nixify"
],
"nixlib": [
"wasmcloud",
"nixlib"
]
},
"locked": {
"lastModified": 1727963723,
"narHash": "sha256-urAGMGMH5ousEeVTZ5AaLPfowXaYQoISNXiutV00iQo=",
"owner": "bytecodealliance",
"repo": "wit-deps",
"rev": "eb7c84564acfe13a4197bb15052fd2e2b3d29775",
"type": "github"
},
"original": {
"owner": "bytecodealliance",
"ref": "v0.4.0",
"repo": "wit-deps",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

264
flake.nix Normal file
View File

@ -0,0 +1,264 @@
{
nixConfig.extra-substituters =
[ "https://wasmcloud.cachix.org" "https://crane.cachix.org" ];
nixConfig.extra-trusted-public-keys = [
"wasmcloud.cachix.org-1:9gRBzsKh+x2HbVVspreFg/6iFRiD4aOcUQfXVDl3hiM="
"crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk="
];
description = "A flake for building and running wadm";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/release-24.11";
crane.url = "github:ipetkov/crane";
fenix = {
url = "github:nix-community/fenix";
inputs.nixpkgs.follows = "nixpkgs";
inputs.rust-analyzer-src.follows = "";
};
flake-utils.url = "github:numtide/flake-utils";
advisory-db = {
url = "github:rustsec/advisory-db";
flake = false;
};
# The wash CLI flag is always after the latest host release tag we want
wasmcloud.url = "github:wasmCloud/wasmCloud/wash-cli-v0.37.0";
};
outputs =
{ self, nixpkgs, crane, fenix, flake-utils, advisory-db, wasmcloud, ... }:
flake-utils.lib.eachDefaultSystem (system:
let
pkgs = nixpkgs.legacyPackages.${system};
inherit (pkgs) lib;
craneLib = crane.mkLib pkgs;
src = craneLib.cleanCargoSource ./.;
# Common arguments can be set here to avoid repeating them later
commonArgs = {
inherit src;
strictDeps = true;
buildInputs = [
# Add additional build inputs here
] ++ lib.optionals pkgs.stdenv.isDarwin [
# Additional darwin specific inputs can be set here if needed
];
# Additional environment variables can be set directly here if needed
# MY_CUSTOM_VAR = "some value";
};
craneLibLLvmTools = craneLib.overrideToolchain
(fenix.packages.${system}.complete.withComponents [
"cargo"
"llvm-tools"
"rustc"
]);
# Get the lock file for filtering
rawLockFile = builtins.fromTOML (builtins.readFile ./Cargo.lock);
# Filter out the workspace members
filteredLockFile = rawLockFile // {
package = builtins.filter (x: !lib.strings.hasPrefix "wadm" x.name)
rawLockFile.package;
};
cargoVendorDir =
craneLib.vendorCargoDeps { cargoLockParsed = filteredLockFile; };
cargoLock = craneLib.writeTOML "Cargo.lock" filteredLockFile;
# Build *just* the cargo dependencies (of the entire workspace), but we don't want to build
# any of the other things in the crate to avoid rebuilding things in the dependencies when
# we change workspace crate dependencies
cargoArtifacts = let
commonArgs' = removeAttrs commonArgs [ "src" ];
# Get the manifest file for filtering
rawManifestFile = builtins.fromTOML (builtins.readFile ./Cargo.toml);
# Filter out the workspace members from manifest
filteredManifestFile = with lib;
let
filterWadmAttrs =
filterAttrs (name: _: !strings.hasPrefix "wadm" name);
workspace = removeAttrs rawManifestFile.workspace [ "members" ];
in rawManifestFile // {
workspace = workspace // {
dependencies = filterWadmAttrs workspace.dependencies;
package = workspace.package // {
# pin version to avoid rebuilds on bumps
version = "0.0.0";
};
};
dependencies = filterWadmAttrs rawManifestFile.dependencies;
dev-dependencies =
filterWadmAttrs rawManifestFile.dev-dependencies;
build-dependencies =
filterWadmAttrs rawManifestFile.build-dependencies;
};
cargoToml = craneLib.writeTOML "Cargo.toml" filteredManifestFile;
dummySrc = craneLib.mkDummySrc {
src = pkgs.runCommand "wadm-dummy-src" { } ''
mkdir -p $out
cp --recursive --no-preserve=mode,ownership ${src}/. -t $out
cp ${cargoToml} $out/Cargo.toml
'';
};
args = commonArgs' // {
inherit cargoLock cargoToml cargoVendorDir dummySrc;
cargoExtraArgs = ""; # disable `--locked` passed by default by crane
};
in craneLib.buildDepsOnly args;
individualCrateArgs = commonArgs // {
inherit (craneLib.crateNameFromCargoToml { inherit src; }) version;
# TODO(thomastaylor312) We run unit tests here and e2e tests externally. The nextest step
# wasn't letting me pass in the fileset
doCheck = true;
};
fileSetForCrate = lib.fileset.toSource {
root = ./.;
fileset = lib.fileset.unions [
./Cargo.toml
./Cargo.lock
./tests
./oam
(craneLib.fileset.commonCargoSources ./crates/wadm)
(craneLib.fileset.commonCargoSources ./crates/wadm-client)
(craneLib.fileset.commonCargoSources ./crates/wadm-types)
];
};
# Build the top-level crates of the workspace as individual derivations.
# This allows consumers to only depend on (and build) only what they need.
# Though it is possible to build the entire workspace as a single derivation,
# so this is left up to you on how to organize things
#
# Note that the cargo workspace must define `workspace.members` using wildcards,
# otherwise, omitting a crate (like we do below) will result in errors since
# cargo won't be able to find the sources for all members.
# TODO(thomastaylor312) I tried using `doInstallCargoArtifacts` and passing in things to the
# next derivations as the `cargoArtifacts`, but that ended up always building things twice
# rather than caching. We should look into it more and see if there's a way to make it work.
wadm-lib = craneLib.cargoBuild (individualCrateArgs // {
inherit cargoArtifacts;
pname = "wadm";
cargoExtraArgs = "-p wadm";
src = fileSetForCrate;
});
wadm = craneLib.buildPackage (individualCrateArgs // {
inherit cargoArtifacts;
pname = "wadm-cli";
cargoExtraArgs = "--bin wadm";
src = fileSetForCrate;
});
wadm-client = craneLib.cargoBuild (individualCrateArgs // {
inherit cargoArtifacts;
pname = "wadm-client";
cargoExtraArgs = "-p wadm-client";
src = fileSetForCrate;
});
wadm-types = craneLib.cargoBuild (individualCrateArgs // {
inherit cargoArtifacts;
pname = "wadm-types";
cargoExtraArgs = "-p wadm-types";
src = fileSetForCrate;
});
in {
checks = {
# Build the crates as part of `nix flake check` for convenience
inherit wadm wadm-client wadm-types;
# Run clippy (and deny all warnings) on the workspace source,
# again, reusing the dependency artifacts from above.
#
# Note that this is done as a separate derivation so that
# we can block the CI if there are issues here, but not
# prevent downstream consumers from building our crate by itself.
workspace-clippy = craneLib.cargoClippy (commonArgs // {
inherit cargoArtifacts;
cargoClippyExtraArgs = "--all-targets -- --deny warnings";
});
workspace-doc =
craneLib.cargoDoc (commonArgs // { inherit cargoArtifacts; });
# Check formatting
workspace-fmt = craneLib.cargoFmt { inherit src; };
# Audit dependencies
workspace-audit = craneLib.cargoAudit { inherit src advisory-db; };
# Audit licenses
# my-workspace-deny = craneLib.cargoDeny {
# inherit src;
# };
# TODO: the wadm e2e tests use docker compose and things like `wash up` to test things
# (which accesses network currently). We would need to fix those tests to do something
# else to work properly. The low hanging fruit here would be to use the built artifact
# in the e2e tests so we can output those binaries from the nix build and then just
# run the tests from a separate repo. We could also do something like outputting the
# prebuilt artifacts out into the current directory to save on build time. But that is
# for later us to figure out
runE2ETests = pkgs.runCommand "e2e-tests" {
nativeBuildInputs = with pkgs;
[
nats-server
# wasmcloud.wasmcloud
];
} ''
touch $out
'';
};
packages = {
inherit wadm wadm-client wadm-types wadm-lib;
default = wadm;
} // lib.optionalAttrs (!pkgs.stdenv.isDarwin) {
workspace-llvm-coverage = craneLibLLvmTools.cargoLlvmCov
(commonArgs // { inherit cargoArtifacts; });
};
apps = {
wadm = flake-utils.lib.mkApp { drv = wadm; };
default = flake-utils.lib.mkApp { drv = wadm; };
};
devShells.default = craneLib.devShell {
# Inherit inputs from checks.
checks = self.checks.${system};
RUST_SRC_PATH =
"${pkgs.rust.packages.stable.rustPlatform.rustLibSrc}";
# Extra inputs can be added here; cargo and rustc are provided by default.
packages = [
pkgs.nats-server
pkgs.natscli
pkgs.docker
pkgs.git
wasmcloud.outputs.packages.${system}.default
];
};
});
}

View File

@ -1,9 +1,14 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "wasmCloud Application Deployment Manager (wadm) Manifest",
"title": "Manifest",
"description": "Manifest file based on the Open Application Model (OAM) specification for declaratively managing wasmCloud applications",
"type": "object",
"required": ["apiVersion", "kind", "metadata", "spec"],
"required": [
"apiVersion",
"kind",
"metadata",
"spec"
],
"properties": {
"apiVersion": {
"description": "The OAM version of the manifest",
@ -34,8 +39,18 @@
"definitions": {
"CapabilityProperties": {
"type": "object",
"required": ["image"],
"properties": {
"application": {
"description": "Information to locate a component within a shared application. Cannot be specified if the image is specified.",
"anyOf": [
{
"$ref": "#/definitions/SharedApplicationComponentProperties"
},
{
"type": "null"
}
]
},
"config": {
"description": "Named configuration to pass to the provider. The merged set of configuration will be passed to the provider at runtime using the provider SDK's `init()` function.",
"type": "array",
@ -45,11 +60,17 @@
},
"id": {
"description": "The component ID to use for this provider. If not supplied, it will be generated as a combination of the [Metadata::name] and the image reference.",
"type": ["string", "null"]
"type": [
"string",
"null"
]
},
"image": {
"description": "The image reference to use",
"type": "string"
"description": "The image reference to use. Required unless the component is a shared component that is defined in another shared application.",
"type": [
"string",
"null"
]
},
"secrets": {
"description": "Named secret references to pass to the t. The provider will be able to retrieve these values at runtime using `wasmcloud:secrets/store`.",
@ -67,32 +88,44 @@
"oneOf": [
{
"type": "object",
"required": ["properties", "type"],
"required": [
"properties",
"type"
],
"properties": {
"properties": {
"$ref": "#/definitions/ComponentProperties"
},
"type": {
"type": "string",
"enum": ["component"]
"enum": [
"component"
]
}
}
},
{
"type": "object",
"required": ["properties", "type"],
"required": [
"properties",
"type"
],
"properties": {
"properties": {
"$ref": "#/definitions/CapabilityProperties"
},
"type": {
"type": "string",
"enum": ["capability"]
"enum": [
"capability"
]
}
}
}
],
"required": ["name"],
"required": [
"name"
],
"properties": {
"name": {
"description": "The name of this component",
@ -100,7 +133,10 @@
},
"traits": {
"description": "A list of various traits assigned to this component",
"type": ["array", "null"],
"type": [
"array",
"null"
],
"items": {
"$ref": "#/definitions/Trait"
}
@ -109,8 +145,18 @@
},
"ComponentProperties": {
"type": "object",
"required": ["image"],
"properties": {
"application": {
"description": "Information to locate a component within a shared application. Cannot be specified if the image is specified.",
"anyOf": [
{
"$ref": "#/definitions/SharedApplicationComponentProperties"
},
{
"type": "null"
}
]
},
"config": {
"description": "Named configuration to pass to the component. The component will be able to retrieve these values at runtime using `wasi:runtime/config.`",
"type": "array",
@ -120,11 +166,17 @@
},
"id": {
"description": "The component ID to use for this component. If not supplied, it will be generated as a combination of the [Metadata::name] and the image reference.",
"type": ["string", "null"]
"type": [
"string",
"null"
]
},
"image": {
"description": "The image reference to use",
"type": "string"
"description": "The image reference to use. Required unless the component is a shared component that is defined in another shared application.",
"type": [
"string",
"null"
]
},
"secrets": {
"description": "Named secret references to pass to the component. The component will be able to retrieve these values at runtime using `wasmcloud:secrets/store`.",
@ -156,7 +208,9 @@
"ConfigProperty": {
"description": "Properties for the config list associated with components, providers, and links\n\n## Usage Defining a config block, like so: ```yaml source_config: - name: \"external-secret-kv\" - name: \"default-port\" properties: port: \"8080\" ```\n\nWill result in two config scalers being created, one with the name `basic-kv` and one with the name `default-port`. Wadm will not resolve collisions with configuration names between manifests.",
"type": "object",
"required": ["name"],
"required": [
"name"
],
"properties": {
"name": {
"description": "Name of the config to ensure exists",
@ -164,7 +218,10 @@
},
"properties": {
"description": "Optional properties to put with the configuration. If the properties are omitted in the manifest, wadm will assume that the configuration is externally managed and will not attempt to create it, only reporting the status as failed if not found.",
"type": ["object", "null"],
"type": [
"object",
"null"
],
"additionalProperties": {
"type": "string"
}
@ -175,7 +232,12 @@
"LinkProperty": {
"description": "Properties for links",
"type": "object",
"required": ["interfaces", "namespace", "package", "target"],
"required": [
"interfaces",
"namespace",
"package",
"target"
],
"properties": {
"interfaces": {
"description": "WIT interfaces for the link",
@ -186,7 +248,10 @@
},
"name": {
"description": "The name of this link",
"type": ["string", "null"]
"type": [
"string",
"null"
]
},
"namespace": {
"description": "WIT namespace for the link",
@ -210,7 +275,10 @@
"source_config": {
"deprecated": true,
"writeOnly": true,
"type": ["array", "null"],
"type": [
"array",
"null"
],
"items": {
"$ref": "#/definitions/ConfigProperty"
}
@ -226,17 +294,24 @@
"target_config": {
"deprecated": true,
"writeOnly": true,
"type": ["array", "null"],
"type": [
"array",
"null"
],
"items": {
"$ref": "#/definitions/ConfigProperty"
}
}
}
},
"additionalProperties": false
},
"Metadata": {
"description": "The metadata describing the manifest",
"type": "object",
"required": ["annotations", "name"],
"required": [
"annotations",
"name"
],
"properties": {
"annotations": {
"description": "Optional data for annotating this manifest see <https://github.com/oam-dev/spec/blob/master/metadata.md#annotations-format>",
@ -261,7 +336,11 @@
"Policy": {
"description": "A policy definition",
"type": "object",
"required": ["name", "properties", "type"],
"required": [
"name",
"properties",
"type"
],
"properties": {
"name": {
"description": "The name of this policy",
@ -282,14 +361,17 @@
},
"SecretProperty": {
"type": "object",
"required": ["name", "properties"],
"required": [
"name",
"properties"
],
"properties": {
"name": {
"description": "The name of the secret. This is used by a reference by the component or capability to get the secret value as a resource.",
"type": "string"
},
"properties": {
"description": "The of the secret. This indicates how to retrieve the secret value from a secrets backend and which backend to actually query.",
"description": "The properties of the secret that indicate how to retrieve the secret value from a secrets backend and which backend to actually query.",
"allOf": [
{
"$ref": "#/definitions/SecretSourceProperty"
@ -300,8 +382,18 @@
},
"SecretSourceProperty": {
"type": "object",
"required": ["key", "policy"],
"required": [
"key",
"policy"
],
"properties": {
"field": {
"description": "The field to use for retrieving the secret from the backend. This is optional and can be used to retrieve a specific field from a secret.",
"type": [
"string",
"null"
]
},
"key": {
"description": "The key to use for retrieving the secret from the backend.",
"type": "string"
@ -312,14 +404,36 @@
},
"version": {
"description": "The version of the secret to retrieve. If not supplied, the latest version will be used.",
"type": ["string", "null"]
"type": [
"string",
"null"
]
}
}
},
"SharedApplicationComponentProperties": {
"type": "object",
"required": [
"component",
"name"
],
"properties": {
"component": {
"description": "The name of the component in the shared application",
"type": "string"
},
"name": {
"description": "The name of the shared application",
"type": "string"
}
}
},
"Specification": {
"description": "A representation of an OAM specification",
"type": "object",
"required": ["components"],
"required": [
"components"
],
"properties": {
"components": {
"description": "The list of components for describing an application",
@ -340,7 +454,10 @@
"Spread": {
"description": "Configuration for various spreading requirements",
"type": "object",
"required": ["name", "requirements"],
"required": [
"name",
"requirements"
],
"properties": {
"name": {
"description": "The name of this spread requirement",
@ -355,7 +472,10 @@
},
"weight": {
"description": "An optional weight for this spread. Higher weights are given more precedence",
"type": ["integer", "null"],
"type": [
"integer",
"null"
],
"format": "uint",
"minimum": 0.0
}
@ -365,7 +485,9 @@
"SpreadScalerProperty": {
"description": "Properties for spread scalers",
"type": "object",
"required": ["instances"],
"required": [
"instances"
],
"properties": {
"instances": {
"description": "Number of instances to spread across matching requirements",
@ -385,7 +507,9 @@
},
"TargetConfig": {
"type": "object",
"required": ["name"],
"required": [
"name"
],
"properties": {
"config": {
"type": "array",
@ -407,7 +531,10 @@
},
"Trait": {
"type": "object",
"required": ["properties", "type"],
"required": [
"properties",
"type"
],
"properties": {
"properties": {
"description": "The properties of this trait",
@ -437,4 +564,4 @@
]
}
}
}
}

View File

@ -18,7 +18,7 @@ The following is a list of the `traits` wasmCloud has added via customization to
## JSON Schema
A JSON schema is automatically generated from our Rust structures and is at the root of the repository: [oam.schema.json](../oam.schema.json).
A JSON schema is automatically generated from our Rust structures and is at the root of the repository: [oam.schema.json](../oam.schema.json). You can regenerate the `oam.schema.json` file by running `cargo run --bin wadm-schema`.
## Example Application YAML

View File

@ -19,7 +19,7 @@ spec:
- name: webcap
type: capability
properties:
image: ghcr.io/wasmcloud/http-server:0.21.0
image: ghcr.io/wasmcloud/http-server:0.23.0
# You can pass any config data you'd like sent to your provider as a string->string map
config:
- name: provider_config

View File

@ -21,7 +21,7 @@ spec:
- name: httpserver
type: capability
properties:
image: ghcr.io/wasmcloud/http-server:0.21.0
image: ghcr.io/wasmcloud/http-server:0.23.0
traits:
# Link the HTTP server and set it to listen on the local machine's port 8080
- type: link

View File

@ -37,7 +37,7 @@ spec:
- name: httpserver
type: capability
properties:
image: ghcr.io/wasmcloud/http-server:0.21.0
image: ghcr.io/wasmcloud/http-server:0.23.0
traits:
# Compose with component to handle wasi:http calls
- type: link

3
rust-toolchain.toml Normal file
View File

@ -0,0 +1,3 @@
[toolchain]
channel = "stable"
components = ["clippy", "rust-src", "rustfmt"]

View File

@ -1,9 +1,10 @@
use std::io::IsTerminal;
use opentelemetry::sdk::{
trace::{IdGenerator, Sampler},
Resource,
};
use opentelemetry_otlp::{Protocol, WithExportConfig};
use std::io::IsTerminal;
use tracing::{Event as TracingEvent, Subscriber};
use tracing_subscriber::fmt::{
format::{Format, Full, Json, JsonFields, Writer},

View File

@ -1,530 +1,40 @@
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use async_nats::jetstream::{stream::Stream, Context};
use anyhow::Context as _;
use clap::Parser;
use tokio::sync::Semaphore;
use tracing::log::debug;
use wadm_types::api::DEFAULT_WADM_TOPIC_PREFIX;
use wadm::{config::WadmConfig, start_wadm};
use wadm::{
consumers::{
manager::{ConsumerManager, WorkerCreator},
*,
},
nats_utils::LatticeIdParser,
scaler::manager::{ScalerManager, WADM_NOTIFY_PREFIX},
server::{ManifestNotifier, Server},
storage::{nats_kv::NatsKvStore, reaper::Reaper},
workers::{CommandPublisher, CommandWorker, EventWorker, StatusPublisher},
DEFAULT_COMMANDS_TOPIC, DEFAULT_EVENTS_TOPIC, DEFAULT_MULTITENANT_EVENTS_TOPIC,
DEFAULT_STATUS_TOPIC, DEFAULT_WADM_EVENTS_TOPIC, DEFAULT_WADM_EVENT_CONSUMER_TOPIC,
};
mod connections;
mod logging;
mod nats;
mod observer;
use connections::ControlClientConstructor;
const WADM_EVENT_STREAM_NAME: &str = "wadm_events";
const WADM_EVENT_CONSUMER_STREAM_NAME: &str = "wadm_event_consumer";
const COMMAND_STREAM_NAME: &str = "wadm_commands";
const STATUS_STREAM_NAME: &str = "wadm_status";
const NOTIFY_STREAM_NAME: &str = "wadm_notify";
const WASMBUS_EVENT_STREAM_NAME: &str = "wasmbus_events";
#[derive(Parser, Debug)]
#[command(name = clap::crate_name!(), version = clap::crate_version!(), about = "wasmCloud Application Deployment Manager", long_about = None)]
struct Args {
/// The ID for this wadm process. Defaults to a random UUIDv4 if none is provided. This is used
/// to help with debugging when identifying which process is doing the work
#[arg(short = 'i', long = "host-id", env = "WADM_HOST_ID")]
host_id: Option<String>,
/// Whether or not to use structured log output (as JSON)
#[arg(
short = 'l',
long = "structured-logging",
default_value = "false",
env = "WADM_STRUCTURED_LOGGING"
)]
structured_logging: bool,
/// Whether or not to enable opentelemetry tracing
#[arg(
short = 't',
long = "tracing",
default_value = "false",
env = "WADM_TRACING_ENABLED"
)]
tracing_enabled: bool,
/// The endpoint to use for tracing. Setting this flag enables tracing, even if --tracing is set
/// to false. Defaults to http://localhost:4318/v1/traces if not set and tracing is enabled
#[arg(short = 'e', long = "tracing-endpoint", env = "WADM_TRACING_ENDPOINT")]
tracing_endpoint: Option<String>,
/// The NATS JetStream domain to connect to
#[arg(short = 'd', env = "WADM_JETSTREAM_DOMAIN")]
domain: Option<String>,
/// (Advanced) Tweak the maximum number of jobs to run for handling events and commands. Be
/// careful how you use this as it can affect performance
#[arg(short = 'j', long = "max-jobs", env = "WADM_MAX_JOBS")]
max_jobs: Option<usize>,
/// The URL of the nats server you want to connect to
#[arg(
short = 's',
long = "nats-server",
env = "WADM_NATS_SERVER",
default_value = "127.0.0.1:4222"
)]
nats_server: String,
/// Use the specified nkey file or seed literal for authentication. Must be used in conjunction with --nats-jwt
#[arg(
long = "nats-seed",
env = "WADM_NATS_NKEY",
conflicts_with = "nats_creds",
requires = "nats_jwt"
)]
nats_seed: Option<String>,
/// Use the specified jwt file or literal for authentication. Must be used in conjunction with --nats-nkey
#[arg(
long = "nats-jwt",
env = "WADM_NATS_JWT",
conflicts_with = "nats_creds",
requires = "nats_seed"
)]
nats_jwt: Option<String>,
/// (Optional) NATS credential file to use when authenticating
#[arg(
long = "nats-creds-file",
env = "WADM_NATS_CREDS_FILE",
conflicts_with_all = ["nats_seed", "nats_jwt"],
)]
nats_creds: Option<PathBuf>,
/// (Optional) NATS TLS certificate file to use when authenticating
#[arg(long = "nats-tls-ca-file", env = "WADM_NATS_TLS_CA_FILE")]
nats_tls_ca_file: Option<PathBuf>,
/// Name of the bucket used for storage of lattice state
#[arg(
long = "state-bucket-name",
env = "WADM_STATE_BUCKET_NAME",
default_value = "wadm_state"
)]
state_bucket: String,
/// The amount of time in seconds to give for hosts to fail to heartbeat and be removed from the
/// store. By default, this is 70s because it is 2x the host heartbeat interval plus a little padding
#[arg(
long = "cleanup-interval",
env = "WADM_CLEANUP_INTERVAL",
default_value = "70"
)]
cleanup_interval: u64,
/// The API topic prefix to use. This is an advanced setting that should only be used if you
/// know what you are doing
#[arg(
long = "api-prefix",
env = "WADM_API_PREFIX",
default_value = DEFAULT_WADM_TOPIC_PREFIX
)]
api_prefix: String,
/// This prefix to used for the internal streams. When running in a multitenant environment,
/// clients share the same JS domain (since messages need to come from lattices).
/// Setting a stream prefix makes it possible to have a separate stream for different wadms running in a multitenant environment.
/// This is an advanced setting that should only be used if you know what you are doing.
#[arg(long = "stream-prefix", env = "WADM_STREAM_PREFIX")]
stream_prefix: Option<String>,
/// Name of the bucket used for storage of manifests
#[arg(
long = "manifest-bucket-name",
env = "WADM_MANIFEST_BUCKET_NAME",
default_value = "wadm_manifests"
)]
manifest_bucket: String,
/// Run wadm in multitenant mode. This is for advanced multitenant use cases with segmented NATS
/// account traffic and not simple cases where all lattices use credentials from the same
/// account. See the deployment guide for more information
#[arg(long = "multitenant", env = "WADM_MULTITENANT", hide = true)]
multitenant: bool,
//
// Max bytes configuration for streams. Primarily configurable to enable deployment on NATS infra
// with limited resources.
//
/// Maximum bytes to keep for the state bucket
#[arg(
long = "state-bucket-max-bytes",
env = "WADM_STATE_BUCKET_MAX_BYTES",
default_value_t = -1,
hide = true
)]
max_state_bucket_bytes: i64,
/// Maximum bytes to keep for the manifest bucket
#[arg(
long = "manifest-bucket-max-bytes",
env = "WADM_MANIFEST_BUCKET_MAX_BYTES",
default_value_t = -1,
hide = true
)]
max_manifest_bucket_bytes: i64,
/// Maximum bytes to keep for the command stream
#[arg(
long = "command-stream-max-bytes",
env = "WADM_COMMAND_STREAM_MAX_BYTES",
default_value_t = -1,
hide = true
)]
max_command_stream_bytes: i64,
/// Maximum bytes to keep for the event stream
#[arg(
long = "event-stream-max-bytes",
env = "WADM_EVENT_STREAM_MAX_BYTES",
default_value_t = -1,
hide = true
)]
max_event_stream_bytes: i64,
/// Maximum bytes to keep for the event consumer stream
#[arg(
long = "event-consumer-stream-max-bytes",
env = "WADM_EVENT_CONSUMER_STREAM_MAX_BYTES",
default_value_t = -1,
hide = true
)]
max_event_consumer_stream_bytes: i64,
/// Maximum bytes to keep for the status stream
#[arg(
long = "status-stream-max-bytes",
env = "WADM_STATUS_STREAM_MAX_BYTES",
default_value_t = -1,
hide = true
)]
max_status_stream_bytes: i64,
/// Maximum bytes to keep for the notify stream
#[arg(
long = "notify-stream-max-bytes",
env = "WADM_NOTIFY_STREAM_MAX_BYTES",
default_value_t = -1,
hide = true
)]
max_notify_stream_bytes: i64,
/// Maximum bytes to keep for the wasmbus event stream
#[arg(
long = "wasmbus-event-stream-max-bytes",
env = "WADM_WASMBUS_EVENT_STREAM_MAX_BYTES",
default_value_t = -1,
hide = true
)]
max_wasmbus_event_stream_bytes: i64,
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let args = Args::parse();
let args = WadmConfig::parse();
logging::configure_tracing(
args.structured_logging,
args.tracing_enabled,
args.tracing_endpoint,
args.tracing_endpoint.clone(),
);
// Build storage adapter for lattice state (on by default)
let (client, context) = nats::get_client_and_context(
args.nats_server.clone(),
args.domain.clone(),
args.nats_seed.clone(),
args.nats_jwt.clone(),
args.nats_creds.clone(),
args.nats_tls_ca_file.clone(),
)
.await?;
// TODO: We will probably need to set up all the flags (like lattice prefix and topic prefix) down the line
let connection_pool = ControlClientConstructor::new(client.clone(), None);
let trimmer: &[_] = &['.', '>', '*'];
let store =
nats::ensure_kv_bucket(&context, args.state_bucket, 1, args.max_state_bucket_bytes).await?;
let state_storage = NatsKvStore::new(store);
let manifest_storage = nats::ensure_kv_bucket(
&context,
args.manifest_bucket,
1,
args.max_manifest_bucket_bytes,
)
.await?;
let internal_stream_name = |stream_name: &str| -> String {
match args.stream_prefix.clone() {
Some(stream_prefix) => {
format!(
"{}.{}",
stream_prefix.trim_end_matches(trimmer),
stream_name
)
}
None => stream_name.to_string(),
}
};
debug!("Ensuring wadm event stream");
let event_stream = nats::ensure_limits_stream(
&context,
internal_stream_name(WADM_EVENT_STREAM_NAME),
vec![DEFAULT_WADM_EVENTS_TOPIC.to_owned()],
Some(
"A stream that stores all events coming in on the wadm.evt subject in a cluster"
.to_string(),
),
args.max_event_stream_bytes,
)
.await?;
debug!("Ensuring command stream");
let command_stream = nats::ensure_stream(
&context,
internal_stream_name(COMMAND_STREAM_NAME),
vec![DEFAULT_COMMANDS_TOPIC.to_owned()],
Some("A stream that stores all commands for wadm".to_string()),
args.max_command_stream_bytes,
)
.await?;
let status_stream = nats::ensure_status_stream(
&context,
internal_stream_name(STATUS_STREAM_NAME),
vec![DEFAULT_STATUS_TOPIC.to_owned()],
args.max_status_stream_bytes,
)
.await?;
debug!("Ensuring wasmbus event stream");
// Remove the previous wadm_(multitenant)_mirror streams so that they don't
// prevent us from creating the new wasmbus_(multitenant)_events stream
// TODO(joonas): Remove this some time in the future once we're confident
// enough that there are no more wadm_(multitenant)_mirror streams around.
for mirror_stream_name in &["wadm_mirror", "wadm_multitenant_mirror"] {
if (context.get_stream(mirror_stream_name).await).is_ok() {
context.delete_stream(mirror_stream_name).await?;
}
}
let wasmbus_event_subjects = match args.multitenant {
true => vec![DEFAULT_MULTITENANT_EVENTS_TOPIC.to_owned()],
false => vec![DEFAULT_EVENTS_TOPIC.to_owned()],
};
let wasmbus_event_stream = nats::ensure_limits_stream(
&context,
WASMBUS_EVENT_STREAM_NAME.to_string(),
wasmbus_event_subjects.clone(),
Some(
"A stream that stores all events coming in on the wasmbus.evt subject in a cluster"
.to_string(),
),
args.max_wasmbus_event_stream_bytes,
)
.await?;
debug!("Ensuring notify stream");
let notify_stream = nats::ensure_notify_stream(
&context,
NOTIFY_STREAM_NAME.to_owned(),
vec![format!("{WADM_NOTIFY_PREFIX}.*")],
args.max_notify_stream_bytes,
)
.await?;
debug!("Ensuring event consumer stream");
let event_consumer_stream = nats::ensure_event_consumer_stream(
&context,
WADM_EVENT_CONSUMER_STREAM_NAME.to_owned(),
DEFAULT_WADM_EVENT_CONSUMER_TOPIC.to_owned(),
vec![&wasmbus_event_stream, &event_stream],
Some(
"A stream that sources from wadm_events and wasmbus_events for wadm event consumer's use"
.to_string(),
),
args.max_event_consumer_stream_bytes,
)
.await?;
debug!("Creating event consumer manager");
let permit_pool = Arc::new(Semaphore::new(
args.max_jobs.unwrap_or(Semaphore::MAX_PERMITS),
));
let event_worker_creator = EventWorkerCreator {
state_store: state_storage.clone(),
manifest_store: manifest_storage.clone(),
pool: connection_pool.clone(),
command_topic_prefix: DEFAULT_COMMANDS_TOPIC.trim_matches(trimmer).to_owned(),
publisher: context.clone(),
notify_stream,
status_stream: status_stream.clone(),
};
let events_manager: ConsumerManager<EventConsumer> = ConsumerManager::new(
permit_pool.clone(),
event_consumer_stream,
event_worker_creator.clone(),
args.multitenant,
)
.await;
debug!("Creating command consumer manager");
let command_worker_creator = CommandWorkerCreator {
pool: connection_pool,
};
let commands_manager: ConsumerManager<CommandConsumer> = ConsumerManager::new(
permit_pool.clone(),
command_stream,
command_worker_creator.clone(),
args.multitenant,
)
.await;
// TODO(thomastaylor312): We might want to figure out how not to run this globally. Doing a
// synthetic event sent to the stream could be nice, but all the wadm processes would still fire
// off that tick, resulting in multiple people handling. We could maybe get it to work with the
// right duplicate window, but we have no idea when each process could fire a tick. Worst case
// scenario right now is that multiple fire simultaneously and a few of them just delete nothing
let reaper = Reaper::new(
state_storage.clone(),
Duration::from_secs(args.cleanup_interval / 2),
[],
);
let wadm_event_prefix = DEFAULT_WADM_EVENTS_TOPIC.trim_matches(trimmer);
debug!("Creating lattice observer");
let observer = observer::Observer {
parser: LatticeIdParser::new("wasmbus", args.multitenant),
command_manager: commands_manager,
event_manager: events_manager,
reaper,
client: client.clone(),
command_worker_creator,
event_worker_creator,
};
debug!("Subscribing to API topic");
let server = Server::new(
manifest_storage,
client,
Some(&args.api_prefix),
args.multitenant,
status_stream,
ManifestNotifier::new(wadm_event_prefix, context),
)
.await?;
let mut wadm = start_wadm(args).await.context("failed to run wadm")?;
tokio::select! {
res = server.serve() => {
res?
res = wadm.join_next() => {
match res {
Some(Ok(_)) => {
tracing::info!("WADM has exited successfully");
std::process::exit(0);
}
Some(Err(e)) => {
tracing::error!("WADM has exited with an error: {:?}", e);
std::process::exit(1);
}
None => {
tracing::info!("WADM server did not start");
std::process::exit(0);
}
}
}
res = observer.observe(wasmbus_event_subjects) => {
res?
_ = tokio::signal::ctrl_c() => {
tracing::info!("Received Ctrl+C, shutting down");
std::process::exit(0);
}
_ = tokio::signal::ctrl_c() => {}
}
Ok(())
}
#[derive(Clone)]
struct CommandWorkerCreator {
pool: ControlClientConstructor,
}
#[async_trait::async_trait]
impl WorkerCreator for CommandWorkerCreator {
type Output = CommandWorker;
async fn create(
&self,
lattice_id: &str,
multitenant_prefix: Option<&str>,
) -> anyhow::Result<Self::Output> {
let client = self.pool.get_connection(lattice_id, multitenant_prefix);
Ok(CommandWorker::new(client))
}
}
#[derive(Clone)]
struct EventWorkerCreator<StateStore> {
state_store: StateStore,
manifest_store: async_nats::jetstream::kv::Store,
pool: ControlClientConstructor,
command_topic_prefix: String,
publisher: Context,
notify_stream: Stream,
status_stream: Stream,
}
#[async_trait::async_trait]
impl<StateStore> WorkerCreator for EventWorkerCreator<StateStore>
where
StateStore: wadm::storage::Store + Send + Sync + Clone + 'static,
{
type Output = EventWorker<StateStore, wasmcloud_control_interface::Client, Context>;
async fn create(
&self,
lattice_id: &str,
multitenant_prefix: Option<&str>,
) -> anyhow::Result<Self::Output> {
let client = self.pool.get_connection(lattice_id, multitenant_prefix);
let command_publisher = CommandPublisher::new(
self.publisher.clone(),
&format!("{}.{lattice_id}", self.command_topic_prefix),
);
let status_publisher = StatusPublisher::new(
self.publisher.clone(),
Some(self.status_stream.clone()),
&format!("wadm.status.{lattice_id}"),
);
let manager = ScalerManager::new(
self.publisher.clone(),
self.notify_stream.clone(),
lattice_id,
multitenant_prefix,
self.state_store.clone(),
self.manifest_store.clone(),
command_publisher.clone(),
status_publisher.clone(),
client.clone(),
)
.await?;
Ok(EventWorker::new(
self.state_store.clone(),
client,
command_publisher,
status_publisher,
manager,
))
}
}

View File

@ -452,7 +452,7 @@ async fn test_delete_noop() {
.expect("should have created a nats client");
let test_server = setup_server("delete_noop", nats_client).await;
// Delete something that doesn't exist
// Delete a model that doesn't exist
let resp: DeleteModelResponse = test_server
.get_response(
"default.model.del.my-example-app",
@ -469,7 +469,19 @@ async fn test_delete_noop() {
);
assert!(!resp.message.is_empty(), "Should have a message set");
// Delete a non-existent version
let resp: DeleteModelResponse = test_server
.get_response(
"default.model.del.my-example-app",
serde_json::to_vec(&DeleteModelRequest { version: None }).unwrap(),
None,
)
.await;
assert!(
matches!(resp.result, DeleteResult::Noop),
"Should have gotten noop response for already deleted model"
);
// Delete a non-existent version for an existing model
let raw = tokio::fs::read("./oam/sqldbpostgres.yaml")
.await
.expect("Unable to load file");

View File

@ -353,14 +353,7 @@ async fn test_annotation_stop() {
// acts on _everything_. We could technically move this back down after the initial scale up of
// the managed components after https://github.com/wasmCloud/wasmCloud/issues/746 is resolved
ctl_client
.scale_component(
host_id,
HELLO_IMAGE_REF,
"unmanaged-hello",
1,
None,
vec![],
)
.scale_component(host_id, HELLO_IMAGE_REF, "unmanaged-hello", 1, None, vec![])
.await
.unwrap();

View File

@ -6,7 +6,7 @@ services:
- 4222:4222
# Have hosts in 3 different "regions"
wasmcloud_east:
image: wasmcloud/wasmcloud:1.0.4
image: wasmcloud/wasmcloud:latest
depends_on:
- nats
deploy:
@ -18,7 +18,7 @@ services:
WASMCLOUD_CLUSTER_SEED: SCAOGJWX53TGI4233T6GAXWYWBIB5ZDGPTCO6ODJQYELS52YCQCBQSRPA4
WASMCLOUD_LABEL_region: us-brooks-east
wasmcloud_west:
image: wasmcloud/wasmcloud:1.0.4
image: wasmcloud/wasmcloud:latest
depends_on:
- nats
deploy:
@ -30,7 +30,7 @@ services:
WASMCLOUD_CLUSTER_SEED: SCAOGJWX53TGI4233T6GAXWYWBIB5ZDGPTCO6ODJQYELS52YCQCBQSRPA4
WASMCLOUD_LABEL_region: us-taylor-west
wasmcloud_moon:
image: wasmcloud/wasmcloud:1.0.4
image: wasmcloud/wasmcloud:latest
depends_on:
- nats
deploy:

View File

@ -0,0 +1,28 @@
services:
nats:
image: nats:2.10-alpine
command: ['-js']
ports:
- 4222:4222
wasmcloud_test_host_one:
image: wasmcloud/wasmcloud:latest
depends_on:
- nats
deploy:
replicas: 2
environment:
LC_ALL: en_US.UTF-8
RUST_LOG: debug,hyper=info
WASMCLOUD_NATS_HOST: nats
WASMCLOUD_LATTICE: shared_providers
wasmcloud_test_host_two:
image: wasmcloud/wasmcloud:latest
depends_on:
- nats
deploy:
replicas: 2
environment:
LC_ALL: en_US.UTF-8
RUST_LOG: debug,hyper=info
WASMCLOUD_NATS_HOST: nats
WASMCLOUD_LATTICE: shared_components

View File

@ -5,7 +5,7 @@ services:
ports:
- 4222:4222
wasmcloud:
image: wasmcloud/wasmcloud:1.0.4
image: wasmcloud/wasmcloud:latest
depends_on:
- nats
deploy:

View File

@ -154,7 +154,7 @@ impl ClientInfo {
self.wadm_clients.insert(lattice_prefix.to_string(), client);
}
pub async fn launch_wadm(&mut self) {
pub async fn launch_wadm(&mut self, extra_envs: Option<HashMap<&str, &str>>) {
let repo_root =
PathBuf::from(std::env::var("CARGO_MANIFEST_DIR").expect("Unable to find repo root"));
// Create the logging directory
@ -172,6 +172,12 @@ impl ClientInfo {
wadm_binary_path.display()
)
}
let mut envs = HashMap::from([
("RUST_LOG","info,wadm=debug,wadm::scaler=trace,wadm::workers::event=trace,wasmcloud_control_interface=trace")
]);
if let Some(extra_envs) = extra_envs {
envs.extend(extra_envs);
}
for i in 0..3 {
let log_path = log_dir.join(format!("wadm-{i}"));
@ -184,10 +190,7 @@ impl ClientInfo {
.stderr(file)
.stdout(Stdio::null())
.kill_on_drop(true)
.env(
"RUST_LOG",
"info,wadm=debug,wadm::scaler=trace,wadm::workers::event=trace,wasmcloud_control_interface=trace",
)
.envs(&envs)
.spawn()
.expect("Unable to spawn wadm binary");
self.commands.push(child);

View File

@ -41,7 +41,7 @@ async fn run_multiple_host_tests() {
let mut client_info = ClientInfo::new(manifest_dir, compose_file).await;
client_info.add_ctl_client(DEFAULT_LATTICE_ID, None).await;
client_info.add_wadm_client(DEFAULT_LATTICE_ID).await;
client_info.launch_wadm().await;
client_info.launch_wadm(None).await;
// Wait for the first event on the lattice prefix before we start deploying and checking
// statuses. Wadm can absolutely handle hosts starting before you start the wadm process, but the first event

477
tests/e2e_shared.rs Normal file
View File

@ -0,0 +1,477 @@
#![cfg(feature = "_e2e_tests")]
use std::time::Duration;
use std::{collections::HashMap, path::PathBuf};
use anyhow::{ensure, Context as _};
use futures::StreamExt;
use helpers::HTTP_CLIENT_IMAGE_REF;
use wadm_types::api::StatusType;
mod e2e;
mod helpers;
use e2e::{assert_status, check_components, check_providers, ClientInfo, ExpectedCount};
use crate::{
e2e::check_status,
helpers::{HELLO_IMAGE_REF, HTTP_SERVER_IMAGE_REF},
};
const MANIFESTS_PATH: &str = "tests/fixtures/manifests/shared";
const DOCKER_COMPOSE_FILE: &str = "tests/docker-compose-e2e_shared.yaml";
const SHARED_COMPONENTS_LATTICE: &str = "shared_components";
const SHARED_PROVIDERS_LATTICE: &str = "shared_providers";
const INVALID_TEST_LATTICE: &str = "shared_invalid";
#[cfg(feature = "_e2e_tests")]
#[tokio::test(flavor = "multi_thread")]
async fn run_shared_component_tests() {
use futures::FutureExt;
let root_dir =
PathBuf::from(std::env::var("CARGO_MANIFEST_DIR").expect("Unable to find repo root"));
let manifest_dir = root_dir.join(MANIFESTS_PATH);
let compose_file = root_dir.join(DOCKER_COMPOSE_FILE);
let mut client_info = ClientInfo::new(manifest_dir, compose_file).await;
client_info
.add_ctl_client(SHARED_COMPONENTS_LATTICE, None)
.await;
client_info.add_wadm_client(SHARED_COMPONENTS_LATTICE).await;
client_info
.add_ctl_client(SHARED_PROVIDERS_LATTICE, None)
.await;
client_info.add_wadm_client(SHARED_PROVIDERS_LATTICE).await;
client_info.add_ctl_client(INVALID_TEST_LATTICE, None).await;
client_info.add_wadm_client(INVALID_TEST_LATTICE).await;
client_info.launch_wadm(None).await;
// Wait for the first event on the lattice prefix before we start deploying and checking
// statuses. Wadm can absolutely handle hosts starting before you start the wadm process, but the first event
// on the lattice will initialize the lattice monitor and for the following test we quickly assert things.
let mut sub = client_info
.client
.subscribe("wasmbus.evt.*.>".to_string())
.await
.expect("Should be able to subscribe to default events");
// Host heartbeats happen every 30 seconds, if we don't get a heartbeat in 2 minutes, bail.
let _ = tokio::time::timeout(std::time::Duration::from_secs(120), sub.next())
.await
.expect("should have received a host heartbeat event before timeout");
// Wait for hosts to start
let mut did_start = false;
for _ in 0..10 {
match (
client_info
.ctl_client(SHARED_COMPONENTS_LATTICE)
.get_hosts()
.await,
client_info
.ctl_client(SHARED_PROVIDERS_LATTICE)
.get_hosts()
.await,
) {
(Ok(hosts_one), Ok(hosts_two)) if hosts_one.len() == 2 && hosts_two.len() == 2 => {
eprintln!(
"Hosts {}/2, {}/2 currently available",
hosts_one.len(),
hosts_two.len()
);
did_start = true;
break;
}
(Ok(hosts_one), Ok(hosts_two)) => {
eprintln!(
"Waiting for all hosts to be available, {}/2, {}/2 currently available",
hosts_one.len(),
hosts_two.len()
);
}
(Err(e), _) | (_, Err(e)) => {
eprintln!("Error when fetching hosts: {e}",)
}
}
tokio::time::sleep(Duration::from_secs(1)).await;
}
if !did_start {
panic!("Hosts didn't start")
}
let stream = client_info.get_status_stream().await;
stream
.purge()
.await
.expect("shouldn't have errored purging stream");
// The futures must be boxed or they're technically different types
let tests = [
test_shared_providers(&client_info).boxed(),
test_shared_components(&client_info).boxed(),
test_invalid_shared(&client_info).boxed(),
];
futures::future::join_all(tests).await;
}
async fn test_shared_providers(client_info: &ClientInfo) {
let stream = client_info.get_status_stream().await;
let client = client_info.wadm_client(SHARED_PROVIDERS_LATTICE);
let (name, _version) = client
.put_manifest(client_info.load_raw_manifest("shared_http.yaml").await)
.await
.expect("Shouldn't have errored when creating manifest");
client
.deploy_manifest(&name, None)
.await
.expect("Shouldn't have errored when deploying manifest");
assert_status(None, Some(5), || async {
let inventory = client_info
.get_all_inventory(SHARED_PROVIDERS_LATTICE)
.await?;
check_providers(&inventory, HTTP_SERVER_IMAGE_REF, ExpectedCount::Exactly(1))?;
check_providers(&inventory, HTTP_CLIENT_IMAGE_REF, ExpectedCount::Exactly(1))?;
let links = client_info
.ctl_client(SHARED_PROVIDERS_LATTICE)
.get_links()
.await
.map_err(|e| anyhow::anyhow!("{e:?}"))?
.into_data()
.context("Should have links")?;
ensure!(links.is_empty(), "Shouldn't have any links");
check_status(
&stream,
SHARED_PROVIDERS_LATTICE,
"shared-http",
StatusType::Deployed,
)
.await
.unwrap();
Ok(())
})
.await;
// Deploy manifest with HTTP component that depends on the shared manifest
let (name, _version) = client
.put_manifest(client_info.load_raw_manifest("shared_http_dev.yaml").await)
.await
.expect("Shouldn't have errored when creating manifest");
client
.deploy_manifest(&name, None)
.await
.expect("Shouldn't have errored when deploying manifest");
assert_status(None, Some(5), || async {
let inventory = client_info
.get_all_inventory(SHARED_PROVIDERS_LATTICE)
.await?;
// Ensure all configuration is set correctly
let config = client_info
.ctl_client(SHARED_PROVIDERS_LATTICE)
.get_config("shared_http_dev-httpaddr")
.await
.map_err(|e| anyhow::anyhow!("should have http provider source config {e}"))?
.into_data()
.context("should have http provider source config response")?;
assert_eq!(
config,
HashMap::from_iter(vec![("address".to_string(), "0.0.0.0:8080".to_string())])
);
check_providers(&inventory, HTTP_SERVER_IMAGE_REF, ExpectedCount::Exactly(1))?;
check_providers(&inventory, HTTP_CLIENT_IMAGE_REF, ExpectedCount::Exactly(1))?;
check_components(&inventory, HELLO_IMAGE_REF, "shared-http-dev", 12)?;
let links = client_info
.ctl_client(SHARED_PROVIDERS_LATTICE)
.get_links()
.await
.map_err(|e| anyhow::anyhow!("{e:?}"))?
.into_data()
.context("Should have links")?;
ensure!(
links.len() == 2,
"Should have two links: http_server -> component -> http_client"
);
if !links.iter().any(|ld| {
// This is checking that the source ID and the target
// come from the correct generated manifest IDs
ld.source_id() == "shared_http-httpserver"
&& ld.target() == "shared_http_dev-hello"
&& ld.wit_namespace() == "wasi"
&& ld.wit_package() == "http"
&& ld.interfaces() == &vec!["incoming-handler"]
&& ld.name() == "default"
}) {
anyhow::bail!(
"Link between http server provider and hello component should exist: {:#?}",
links
)
}
if !links.iter().any(|ld| {
// This is checking that the source ID and the target
// come from the correct generated manifest IDs
ld.source_id() == "shared_http_dev-hello"
&& ld.target() == "shared_http-httpclient"
&& ld.wit_namespace() == "wasi"
&& ld.wit_package() == "http"
&& ld.interfaces() == &vec!["outgoing-handler"]
&& ld.name() == "default"
}) {
anyhow::bail!(
"Link between hello component and http client provider should exist: {:#?}",
links
)
}
check_status(
&stream,
SHARED_PROVIDERS_LATTICE,
"shared-http",
StatusType::Deployed,
)
.await
.unwrap();
check_status(
&stream,
SHARED_PROVIDERS_LATTICE,
"shared-http-dev",
StatusType::Deployed,
)
.await
.unwrap();
// TODO(#451): Additional validation tests coming in a follow-up PR
// // You can't undeploy an application that is depended on
// assert!(client.undeploy_manifest("shared-http").await.is_err());
// assert!(client.delete_manifest("shared-http", None).await.is_err());
// // Once dependent application is undeployed, you can undeploy and delete
// assert!(client.undeploy_manifest("shared-http-dev").await.is_ok());
// assert!(client.undeploy_manifest("shared-http").await.is_ok());
// assert!(client.delete_manifest("shared-http", None).await.is_ok());
Ok(())
})
.await;
}
async fn test_shared_components(client_info: &ClientInfo) {
let stream = client_info.get_status_stream().await;
let client = client_info.wadm_client(SHARED_COMPONENTS_LATTICE);
let (name, _version) = client
.put_manifest(client_info.load_raw_manifest("shared_component.yaml").await)
.await
.expect("Shouldn't have errored when creating manifest");
client
.deploy_manifest(&name, None)
.await
.expect("Shouldn't have errored when deploying manifest");
assert_status(None, Some(5), || async {
let inventory = client_info
.get_all_inventory(SHARED_COMPONENTS_LATTICE)
.await?;
let config = client_info
.ctl_client(SHARED_COMPONENTS_LATTICE)
.get_config("shared_component-defaults")
.await
.map_err(|e| anyhow::anyhow!("should have http provider source config {e}"))?
.into_data()
.context("should have http provider source config response")?;
assert_eq!(
config,
HashMap::from_iter(vec![("left".to_string(), "right".to_string())])
);
check_components(&inventory, HELLO_IMAGE_REF, "shared-component", 1)?;
let links = client_info
.ctl_client(SHARED_COMPONENTS_LATTICE)
.get_links()
.await
.map_err(|e| anyhow::anyhow!("{e:?}"))?
.into_data()
.context("Should have links")?;
ensure!(links.is_empty(), "Shouldn't have any links");
check_status(
&stream,
SHARED_COMPONENTS_LATTICE,
"shared-component",
StatusType::Deployed,
)
.await
.unwrap();
Ok(())
})
.await;
// Deploy manifest with HTTP component that depends on the shared manifest
let (name, _version) = client
.put_manifest(
client_info
.load_raw_manifest("shared_component_dev.yaml")
.await,
)
.await
.expect("Shouldn't have errored when creating manifest");
client
.deploy_manifest(&name, None)
.await
.expect("Shouldn't have errored when deploying manifest");
assert_status(None, Some(5), || async {
let inventory = client_info
.get_all_inventory(SHARED_COMPONENTS_LATTICE)
.await?;
check_providers(&inventory, HTTP_SERVER_IMAGE_REF, ExpectedCount::Exactly(1))?;
check_components(&inventory, HELLO_IMAGE_REF, "shared-component", 1)?;
check_components(&inventory, HELLO_IMAGE_REF, "shared-component-dev", 12)?;
let config = client_info
.ctl_client(SHARED_COMPONENTS_LATTICE)
.get_config("shared_component_dev-someconfig")
.await
.map_err(|e| anyhow::anyhow!("should have http provider source config {e}"))?
.into_data()
.context("should have http provider source config response")?;
assert_eq!(
config,
HashMap::from_iter(vec![("foo".to_string(), "bar".to_string())])
);
let links = client_info
.ctl_client(SHARED_COMPONENTS_LATTICE)
.get_links()
.await
.map_err(|e| anyhow::anyhow!("{e:?}"))?
.into_data()
.context("Should have links")?;
ensure!(links.len() == 3, "Should have three links");
if !links.iter().any(|ld| {
ld.source_id() == "shared_component_dev-hello"
&& ld.target() == "shared_component-link_to_meee"
&& ld.wit_namespace() == "custom"
&& ld.wit_package() == "package"
&& ld.interfaces() == &vec!["inter", "face"]
&& ld.name() == "default"
}) {
anyhow::bail!("Link between hello components should exist: {:#?}", links)
}
if !links.iter().any(|ld| {
ld.source_id() == "shared_component-link_to_meee"
&& ld.target() == "shared_component_dev-hello"
&& ld.wit_namespace() == "custom"
&& ld.wit_package() == "package"
&& ld.interfaces() == &vec!["inter", "face"]
&& ld.name() == "default"
}) {
anyhow::bail!("Link between hello components should exist: {:#?}", links)
}
if !links.iter().any(|ld| {
ld.source_id() == "shared_component_dev-httpserver"
&& ld.target() == "shared_component-link_to_meee"
&& ld.wit_namespace() == "wasi"
&& ld.wit_package() == "http"
&& ld.interfaces() == &vec!["incoming-handler"]
&& ld.name() == "default"
}) {
anyhow::bail!(
"Link between http server provider and hello component should exist: {:#?}",
links
)
}
check_status(
&stream,
SHARED_COMPONENTS_LATTICE,
"shared-component",
StatusType::Deployed,
)
.await
.unwrap();
check_status(
&stream,
SHARED_COMPONENTS_LATTICE,
"shared-component-dev",
StatusType::Deployed,
)
.await
.unwrap();
Ok(())
})
.await;
}
async fn test_invalid_shared(client_info: &ClientInfo) {
let client = client_info.wadm_client(INVALID_TEST_LATTICE);
// Including `image` and `application` is not supported
assert!(client
.put_manifest(client_info.load_raw_manifest("both_properties.yaml").await)
.await
.is_err());
// Must include `image` or `application`
assert!(client
.put_manifest(client_info.load_raw_manifest("no_properties.yaml").await)
.await
.is_err());
// If the app or component is mismatched, should warn at put time
// and fail to deploy
let (name, _version) = client
.put_manifest(client_info.load_raw_manifest("no_matching_app.yaml").await)
.await
.expect("Shouldn't have errored when creating manifest");
assert!(client.deploy_manifest(&name, None).await.is_err());
let (name, _version) = client
.put_manifest(
client_info
.load_raw_manifest("no_matching_component.yaml")
.await,
)
.await
.expect("Shouldn't have errored when creating manifest");
assert!(client.deploy_manifest(&name, None).await.is_err());
// Deploy manifest, but not shared, and another app that depends on it, which should fail
let (name, _version) = client
.put_manifest(client_info.load_raw_manifest("notshared_http.yaml").await)
.await
.expect("Shouldn't have errored when creating manifest");
client
.deploy_manifest(&name, None)
.await
.expect("Shouldn't have errored when deploying manifest");
let (name, _version) = client
.put_manifest(
client_info
.load_raw_manifest("notshared_http_dev.yaml")
.await,
)
.await
.expect("Shouldn't have errored when creating manifest");
assert!(client.deploy_manifest(&name, None).await.is_err());
}

View File

@ -35,7 +35,9 @@ async fn run_upgrade_tests() {
let mut client_info = ClientInfo::new(manifest_dir, compose_file).await;
client_info.add_ctl_client("default", None).await;
client_info.add_wadm_client("default").await;
client_info.launch_wadm().await;
client_info
.launch_wadm(Some(HashMap::from([("--stream-persistence", "memory")])))
.await;
// Wait for the first event on the lattice prefix before we start deploying and checking
// statuses. Wadm can absolutely handle hosts starting before you start the wadm process, but the first event
@ -291,7 +293,7 @@ async fn test_upgrade(client_info: &ClientInfo) {
)?;
check_providers(
&inventory,
"ghcr.io/wasmcloud/http-server:0.21.0",
"ghcr.io/wasmcloud/http-server:0.23.0",
ExpectedCount::Exactly(1),
)?;
check_providers(
@ -409,7 +411,7 @@ async fn test_upgrade(client_info: &ClientInfo) {
)?;
check_providers(
&inventory,
"ghcr.io/wasmcloud/http-server:0.21.0",
"ghcr.io/wasmcloud/http-server:0.23.0",
ExpectedCount::Exactly(1),
)?;
check_providers(
@ -510,12 +512,12 @@ async fn test_upgrade(client_info: &ClientInfo) {
// let inventory = client_info.get_all_inventory("default").await?;
// check_providers(
// &inventory,
// "ghcr.io/wasmcloud/http-server:0.21.0",
// "ghcr.io/wasmcloud/http-server:0.23.0",
// ExpectedCount::Exactly(1),
// )?;
// check_providers(
// &inventory,
// "ghcr.io/wasmcloud/http-server:0.21.0",
// "ghcr.io/wasmcloud/http-server:0.23.0",
// ExpectedCount::Exactly(0),
// )?;
// Ok(())

View File

@ -186,6 +186,8 @@ async fn test_event_stream() -> Result<()> {
HELLO_COMPONENT_ID,
"wasi",
"http",
"--link-name",
"default",
"--ctl-port",
&ctl_port,
])

View File

@ -9,7 +9,7 @@ spec:
- name: test-policy
type: test
properties:
test: "data"
test: 'data'
components:
- name: hello
type: component
@ -36,7 +36,7 @@ spec:
- name: httpserver
type: capability
properties:
image: ghcr.io/wasmcloud/http-server:0.21.0
image: ghcr.io/wasmcloud/http-server:0.23.0
traits:
- type: spreadscaler
properties:

View File

@ -50,7 +50,7 @@ spec:
- name: httpserver
type: capability
properties:
image: ghcr.io/wasmcloud/http-server:0.21.0
image: ghcr.io/wasmcloud/http-server:0.23.0
id: http_server
traits:
- type: spreadscaler

View File

@ -0,0 +1,36 @@
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: test-different-interfaces
annotations:
description: "test"
spec:
components:
- name: my-component
type: component
properties:
image: test:latest
traits:
- type: spreadscaler
properties:
instances: 1
- type: link
properties:
target: redis
namespace: wasi
package: keyvalue
interfaces: [atomics]
- type: link
properties:
target: redis
namespace: wasi
package: keyvalue
interfaces: [store]
- name: redis
type: capability
properties:
image: test:latest
traits:
- type: spreadscaler
properties:
instances: 1

View File

@ -0,0 +1,53 @@
---
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: rust-http-blobstore
annotations:
version: v0.0.1
description: 'HTTP Blobstore demo in Rust, using the WebAssembly Component Model and WebAssembly Interfaces Types (WIT)'
wasmcloud.dev/authors: wasmCloud team
wasmcloud.dev/source-url: https://github.com/wasmCloud/wasmCloud/blob/main/examples/rust/components/http-blobstore/wadm.yaml
wasmcloud.dev/readme-md-url: https://github.com/wasmCloud/wasmCloud/blob/main/examples/rust/components/http-blobstore/README.md
wasmcloud.dev/homepage: https://github.com/wasmCloud/wasmCloud/tree/main/examples/rust/components/http-blobstore
wasmcloud.dev/categories: |
http,http-server,rust,blobstore,object-storage,example
spec:
components:
- name: http-blobstore
type: component
properties:
image: ghcr.io/wasmcloud/components/http-blobstore-rust:0.2.0
traits:
- type: spreadscaler
properties:
instances: 1
- type: link
properties:
target: blobstore-fs
namespace: wasi
package: blobstore
interfaces: [blobstore]
target_config:
- name: root-directory
properties:
root: '/tmp'
- name: httpserver
type: capability
properties:
image: ghcr.io/wasmcloud/http-server:0.23.2
traits:
- type: link
properties:
target: http-blobstore
namespace: wasi
package: http
interfaces: [incoming-handler]
source_config:
- name: default-http
properties:
address: 0.0.0.0:8000
- name: blobstore-fs
type: capability
properties:
image: ghcr.io/wasmcloud/blobstore-fs:0.10.1

View File

@ -0,0 +1,96 @@
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: my-example-app
annotations:
description: "This is my app"
spec:
components:
- name: userinfo1
type: component
properties:
image: wasmcloud.azurecr.io/fake:1
traits:
- type: link
properties:
namespace: wasi
package: keyvalue
interfaces: [atomics, store]
target:
name: kvredis
config:
- name: redis-url
properties:
url: "redis://127.0.0.1:6379"
# this config name is duplicated, but has no properties,
# so it references an existing config
- name: my_example_app-shared_redis
- name: userinfo2
type: component
properties:
image: wasmcloud.azurecr.io/fake:1
traits:
- type: link
properties:
namespace: wasi
package: keyvalue
interfaces: [atomics, store]
target:
name: kvredis
config:
- name: redis-url
properties:
url: "redis://127.0.0.1:6379"
# this config name is duplicated, but has no properties,
# so it references an existing config
- name: my_example_app-shared_redis
- name: webcap1
type: capability
properties:
id: httpserver1
image: wasmcloud.azurecr.io/httpserver:0.13.1
traits:
- type: link
properties:
namespace: wasi
package: http
interfaces: ["incoming-handler"]
target:
name: userinfo1
source:
config:
- name: default-port
properties:
port: 0.0.0.0:8080
- name: alternate-port
properties:
address: 0.0.0.0:8081
- name: alternate-port
properties:
address: 0.0.0.0:8081
- name: webcap2
type: capability
properties:
id: httpserver2
image: wasmcloud.azurecr.io/httpserver:0.14.1
traits:
- type: link
properties:
target:
name: userinfo2
namespace: wasi
package: http
interfaces: ["incoming-handler"]
source:
config:
- name: default-port
properties:
address: 0.0.0.0:8080
- name: kvredis
type: capability
properties:
image: ghcr.io/wasmcloud/keyvalue-redis:0.28.1

View File

@ -0,0 +1,49 @@
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: test-link-name-uniqueness
annotations:
description: 'test'
spec:
components:
- name: http-component
type: component
properties:
image: file://./build/http_hello_world_s.wasm
traits:
- type: spreadscaler
properties:
instances: 1
- name: http-component-two
type: component
properties:
image: file://./build/http_hello_world_s.wasm
traits:
- type: spreadscaler
properties:
instances: 1
- name: httpserver
type: capability
properties:
image: ghcr.io/wasmcloud/http-server:0.22.0
traits:
- type: link
properties:
target: http-component
namespace: wasi
package: http
interfaces: [incoming-handler]
source_config:
- name: default-http
properties:
address: 127.0.0.1:8080
- type: link
properties:
target: http-component-two
namespace: wasi
package: http
interfaces: [incoming-handler]
source_config:
- name: default-http-two
properties:
address: 127.0.0.1:8081

View File

@ -0,0 +1,44 @@
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: test-duplicate-interfaces
annotations:
description: "test"
spec:
components:
- name: my-component
type: component
properties:
image: test:latest
traits:
- type: spreadscaler
properties:
instances: 1
- type: link
properties:
target: redis-1
namespace: wasi
package: keyvalue
interfaces: [atomics]
- type: link
properties:
target: redis-2
namespace: wasi
package: keyvalue
interfaces: [atomics]
- name: redis-1
type: capability
properties:
image: test:latest
traits:
- type: spreadscaler
properties:
instances: 1
- name: redis-2
type: capability
properties:
image: test:latest
traits:
- type: spreadscaler
properties:
instances: 1

View File

@ -18,7 +18,7 @@ spec:
- name: httpserver
type: capability
properties:
image: ghcr.io/wasmcloud/http-server:0.21.0
image: ghcr.io/wasmcloud/http-server:0.23.0
traits:
- type: spreadscaler
properties:

View File

@ -55,6 +55,7 @@ spec:
traits:
- type: link
properties:
name: hello
target:
name: hello-world
namespace: wasi
@ -68,6 +69,7 @@ spec:
address: 0.0.0.0:8080
- type: link
properties:
name: dog
target:
name: dog-fetcher
namespace: wasi

View File

@ -0,0 +1,15 @@
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: both-props
annotations:
description: 'Contains a component with image and application'
spec:
components:
- name: httpserver
type: capability
properties:
image: pull-from-me
application:
name: wheee
component: httpserver

View File

@ -0,0 +1,35 @@
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: some-nonexistant-app
annotations:
description: 'Manifest that refers to a nonexistant app'
spec:
components:
- name: hello
type: component
properties:
image: ghcr.io/wasmcloud/components/http-hello-world-rust:0.1.0
traits:
- type: spreadscaler
properties:
instances: 12
- name: httpserver
type: capability
properties:
application:
name: some-nonexistant-app
component: httpserver
traits:
- type: link
properties:
namespace: wasi
package: http
interfaces: [incoming-handler]
target:
name: hello
source:
config:
- name: httpaddr
properties:
address: 0.0.0.0:8080

View File

@ -0,0 +1,35 @@
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: no-matching-component
annotations:
description: 'Manifest that refers to a nonexistant component'
spec:
components:
- name: hello
type: component
properties:
image: ghcr.io/wasmcloud/components/http-hello-world-rust:0.1.0
traits:
- type: spreadscaler
properties:
instances: 12
- name: httpserver
type: capability
properties:
application:
name: shared-http
component: some-nonexistant-component
traits:
- type: link
properties:
namespace: wasi
package: http
interfaces: [incoming-handler]
target:
name: hello
source:
config:
- name: httpaddr
properties:
address: 0.0.0.0:8080

View File

@ -0,0 +1,15 @@
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: no-props
annotations:
description: 'Contains a component with neither image and application'
spec:
components:
- name: httpserver
type: capability
properties:
config:
- name: log
properties:
level: info

View File

@ -0,0 +1,16 @@
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: not-shared-http
annotations:
description: 'My Precious! O my Precious! We needs it. Must have the precious. They stole it from us'
spec:
components:
- name: httpserver
type: capability
properties:
image: ghcr.io/wasmcloud/http-server:0.23.0
traits:
- type: spreadscaler
properties:
instances: 1

View File

@ -0,0 +1,35 @@
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: not-shared-http-dev
annotations:
description: 'A Hello World app that tries to use a not shared component'
spec:
components:
- name: hello
type: component
properties:
image: ghcr.io/wasmcloud/components/http-hello-world-rust:0.1.0
traits:
- type: spreadscaler
properties:
instances: 12
- name: httpserver
type: capability
properties:
application:
name: not-shared-http
component: httpserver
traits:
- type: link
properties:
namespace: wasi
package: http
interfaces: [incoming-handler]
target:
name: hello
source:
config:
- name: httpaddr
properties:
address: 0.0.0.0:8080

View File

@ -0,0 +1,21 @@
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: shared-component
annotations:
description: 'A shared component!'
experimental.wasmcloud.dev/shared: 'true'
spec:
components:
- name: link-to-meee
type: component
properties:
image: ghcr.io/wasmcloud/components/http-hello-world-rust:0.1.0
config:
- name: defaults
properties:
left: right
traits:
- type: spreadscaler
properties:
instances: 1

View File

@ -0,0 +1,59 @@
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: shared-component-dev
annotations:
description: 'A Hello World app for testing, most basic link'
spec:
components:
# Link a component to a shared component
- name: hello
type: component
properties:
image: ghcr.io/wasmcloud/components/http-hello-world-rust:0.1.0
traits:
- type: spreadscaler
properties:
instances: 12
- type: link
properties:
namespace: custom
package: package
interfaces: [inter, face]
target:
name: component-dep
# Shared component, link to a component in this application
- name: component-dep
type: component
properties:
application:
name: shared-component
component: link-to-meee
traits:
- type: link
properties:
namespace: custom
package: package
interfaces: [inter, face]
target:
name: hello
config:
- name: someconfig
properties:
foo: bar
# Link a provider to a shared component
- name: httpserver
type: capability
properties:
image: ghcr.io/wasmcloud/http-server:0.23.0
traits:
- type: spreadscaler
properties:
instances: 1
- type: link
properties:
namespace: wasi
package: http
interfaces: [incoming-handler]
target:
name: component-dep

View File

@ -0,0 +1,25 @@
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: shared-http
annotations:
description: 'A shared HTTP server and client, for everybody!!!!!!!!!!!!!!!!!!!!'
experimental.wasmcloud.dev/shared: 'true'
spec:
components:
- name: httpclient
type: capability
properties:
image: ghcr.io/wasmcloud/http-client:0.12.0
traits:
- type: spreadscaler
properties:
instances: 1
- name: httpserver
type: capability
properties:
image: ghcr.io/wasmcloud/http-server:0.23.0
traits:
- type: spreadscaler
properties:
instances: 1

View File

@ -0,0 +1,50 @@
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: shared-http-dev
annotations:
description: 'A Hello World app for testing, most basic HTTP link'
spec:
components:
- name: hello
type: component
properties:
image: ghcr.io/wasmcloud/components/http-hello-world-rust:0.1.0
traits:
- type: spreadscaler
properties:
instances: 12
- type: link
properties:
namespace: wasi
package: http
interfaces: [outgoing-handler]
target:
# Note that the name in this manifest does not have to be the same
# as the name of the component in the shared manifest
name: http-client-this
- name: http-client-this
type: capability
properties:
application:
name: shared-http
component: httpclient
- name: httpserver
type: capability
properties:
application:
name: shared-http
component: httpserver
traits:
- type: link
properties:
namespace: wasi
package: http
interfaces: [incoming-handler]
target:
name: hello
source:
config:
- name: httpaddr
properties:
address: 0.0.0.0:8080

View File

@ -19,7 +19,7 @@ spec:
- name: httpserver
type: capability
properties:
image: ghcr.io/wasmcloud/http-server:0.21.0
image: ghcr.io/wasmcloud/http-server:0.23.0
id: http_server
traits:
- type: spreadscaler

View File

@ -40,12 +40,13 @@ spec:
- name: httpserver
type: capability
properties:
image: ghcr.io/wasmcloud/http-server:0.21.0
image: ghcr.io/wasmcloud/http-server:0.23.0
id: http_server
traits:
# Updated linkdef trait
- type: link
properties:
name: hello
target:
name: hello-world
namespace: wasi
@ -59,6 +60,7 @@ spec:
address: 0.0.0.0:8082
- type: link
properties:
name: dog
target:
name: dog-fetcher
namespace: wasi

View File

@ -35,7 +35,7 @@ spec:
- name: httpserver
type: capability
properties:
image: ghcr.io/wasmcloud/http-server:0.21.0
image: ghcr.io/wasmcloud/http-server:0.23.0
id: http_server
# Updated config
config:
@ -45,6 +45,7 @@ spec:
traits:
- type: link
properties:
name: hello
target:
name: hello-world
namespace: wasi
@ -59,6 +60,7 @@ spec:
address: 0.0.0.0:8080
- type: link
properties:
name: dog
target:
name: dog-fetcher
namespace: wasi

View File

@ -9,6 +9,6 @@ spec:
- name: httpserver-other
type: capability
properties:
image: ghcr.io/wasmcloud/http-server:0.21.0
image: ghcr.io/wasmcloud/http-server:0.23.0
# This ID should not be allowed to be deployed
id: http_server

View File

@ -21,8 +21,9 @@ use wadm::consumers::{CommandConsumer, ScopedMessage};
pub const DEFAULT_NATS_PORT: u16 = 4222;
pub const HELLO_IMAGE_REF: &str = "ghcr.io/wasmcloud/components/http-hello-world-rust:0.1.0";
pub const HELLO_COMPONENT_ID: &str = "http_hello_world";
pub const HTTP_SERVER_IMAGE_REF: &str = "ghcr.io/wasmcloud/http-server:0.21.0";
pub const HTTP_SERVER_IMAGE_REF: &str = "ghcr.io/wasmcloud/http-server:0.23.0";
pub const HTTP_SERVER_COMPONENT_ID: &str = "http_server";
pub const HTTP_CLIENT_IMAGE_REF: &str = "ghcr.io/wasmcloud/http-client:0.12.0";
/// Get a TCP random port
fn get_random_tcp_port() -> u16 {

View File

@ -44,12 +44,12 @@ async fn validate_misnamed_interface() -> Result<()> {
!failures.is_empty()
&& failures
.iter()
.all(|f| f.level == ValidationFailureLevel::Error),
"failures present, all errors"
.all(|f| f.level == ValidationFailureLevel::Warning),
"failures present, all warnings"
);
assert!(
!failures.valid(),
"manifest should be invalid (misnamed interface w/ right namespace & package is probably a bug)"
failures.valid(),
"manifest should be valid (misnamed interface w/ right namespace & package is probably a bug but might be intentional)"
);
Ok(())
}
@ -118,3 +118,42 @@ async fn validate_policy() -> Result<()> {
assert!(failures.valid(), "manifest is valid");
Ok(())
}
/// Ensure that we can detect duplicated link config names
#[tokio::test]
async fn validate_link_config_names() -> Result<()> {
let (_manifest, failures) =
validate_manifest_file("./tests/fixtures/manifests/duplicate_link_config_names.wadm.yaml")
.await
.context("failed to validate manifest")?;
let expected_errors = 3;
assert!(
!failures.is_empty()
&& failures
.iter()
.all(|f| f.level == ValidationFailureLevel::Error)
&& failures.len() == expected_errors,
"expected {} errors because manifest contains {} duplicated link config names, instead {} errors were found", expected_errors, expected_errors, failures.len()
);
assert!(
!failures.valid(),
"manifest should be invalid (duplicated link config names lead to a dead loop)"
);
Ok(())
}
#[tokio::test]
async fn validate_deprecated_configs_raw_yaml() -> Result<()> {
let (_manifest, failures) = validate_manifest_file(
"./tests/fixtures/manifests/deprecated-source-and-target-config.yaml",
)
.await
.context("failed to validate manifest")?;
assert!(failures.valid(), "expected valid manifest");
assert_eq!(
failures.warnings().len(),
2,
"expected 2 warnings during validating manifest"
);
Ok(())
}

View File

@ -73,6 +73,7 @@ interface types {
deployed,
failed,
waiting,
unhealthy
}
enum deploy-result {
@ -117,7 +118,8 @@ interface types {
// Properties for a component
record component-properties {
image: string,
image: option<string>,
application: option<shared-application-component-properties>,
id: option<string>,
config: list<config-property>,
secrets: list<secret-property>,
@ -125,7 +127,8 @@ interface types {
// Properties for a capability
record capability-properties {
image: string,
image: option<string>,
application: option<shared-application-component-properties>,
id: option<string>,
config: list<config-property>,
secrets: list<secret-property>,
@ -187,6 +190,12 @@ interface types {
version: option<string>,
}
// Shared application component properties
record shared-application-component-properties {
name: string,
component: string
}
// Target configuration
record target-config {
name: string,
@ -206,4 +215,4 @@ interface types {
requirements: list<tuple<string, string>>,
weight: option<u32>,
}
}
}