Compare commits

...

121 Commits
v0.1.0 ... main

Author SHA1 Message Date
dependabot[bot] a3f6886cb3
chore(deps): Bump actions/setup-python from 5.4.0 to 5.6.0 (#122)
Bumps [actions/setup-python](https://github.com/actions/setup-python) from 5.4.0 to 5.6.0.
- [Release notes](https://github.com/actions/setup-python/releases)
- [Commits](https://github.com/actions/setup-python/compare/v5.4.0...v5.6.0)

---
updated-dependencies:
- dependency-name: actions/setup-python
  dependency-version: 5.6.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-03 15:39:41 -04:00
dependabot[bot] 599aec9cbc
chore(deps): Bump tokio from 1.36.0 to 1.42.1 (#121)
Bumps [tokio](https://github.com/tokio-rs/tokio) from 1.36.0 to 1.42.1.
- [Release notes](https://github.com/tokio-rs/tokio/releases)
- [Commits](https://github.com/tokio-rs/tokio/compare/tokio-1.36.0...tokio-1.42.1)

---
updated-dependencies:
- dependency-name: tokio
  dependency-version: 1.42.1
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-03 15:39:28 -04:00
dependabot[bot] 4b8134a5f8
chore(deps): Bump schemars from 0.8.21 to 0.8.22 (#118)
Bumps [schemars](https://github.com/GREsau/schemars) from 0.8.21 to 0.8.22.
- [Release notes](https://github.com/GREsau/schemars/releases)
- [Changelog](https://github.com/GREsau/schemars/blob/master/CHANGELOG.md)
- [Commits](https://github.com/GREsau/schemars/compare/v0.8.21...v0.8.22)

---
updated-dependencies:
- dependency-name: schemars
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-03 15:39:05 -04:00
dependabot[bot] 9c3208ee3a
chore(deps): Bump uuid from 1.5.0 to 1.12.1 (#114)
Bumps [uuid](https://github.com/uuid-rs/uuid) from 1.5.0 to 1.12.1.
- [Release notes](https://github.com/uuid-rs/uuid/releases)
- [Commits](https://github.com/uuid-rs/uuid/compare/1.5.0...1.12.1)

---
updated-dependencies:
- dependency-name: uuid
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-03 15:38:44 -04:00
Joonas Bergius c6d4e59028
chore: Add Security Policy with link to the main repository (#106)
Signed-off-by: Joonas Bergius <joonas@bergi.us>
2025-06-03 15:38:26 -04:00
dependabot[bot] cb051aac61
chore(deps): Bump serde_yaml from 0.9.25 to 0.9.29 (#83)
Bumps [serde_yaml](https://github.com/dtolnay/serde-yaml) from 0.9.25 to 0.9.29.
- [Release notes](https://github.com/dtolnay/serde-yaml/releases)
- [Commits](https://github.com/dtolnay/serde-yaml/compare/0.9.25...0.9.29)

---
updated-dependencies:
- dependency-name: serde_yaml
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-03 15:37:27 -04:00
dependabot[bot] 52007e2927
chore(deps): Bump handlebars from 5.1.0 to 5.1.2 (#77)
Bumps [handlebars](https://github.com/sunng87/handlebars-rust) from 5.1.0 to 5.1.2.
- [Release notes](https://github.com/sunng87/handlebars-rust/releases)
- [Changelog](https://github.com/sunng87/handlebars-rust/blob/master/CHANGELOG.md)
- [Commits](https://github.com/sunng87/handlebars-rust/compare/v5.1.0...v5.1.2)

---
updated-dependencies:
- dependency-name: handlebars
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-03 15:37:10 -04:00
Nicolas Lamirault bc2d7bbea2
feat(helm): Kubernetes labels (#119)
Signed-off-by: Nicolas Lamirault <nicolas.lamirault@gmail.com>
2025-03-03 09:48:22 -05:00
Eric Gregory d4758e98f1 fix(examples): change http provider address for hello world app
Signed-off-by: Eric Gregory <egregory04@gmail.com>
2025-02-14 09:15:40 -05:00
dependabot[bot] 9cf6542828
chore(deps): Bump helm/kind-action from 1.10.0 to 1.12.0 (#109)
Bumps [helm/kind-action](https://github.com/helm/kind-action) from 1.10.0 to 1.12.0.
- [Release notes](https://github.com/helm/kind-action/releases)
- [Commits](https://github.com/helm/kind-action/compare/v1.10.0...v1.12.0)

---
updated-dependencies:
- dependency-name: helm/kind-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-02-14 08:14:56 -06:00
dependabot[bot] 8d495ec202
chore(deps): Bump helm/chart-testing-action from 2.6.1 to 2.7.0 (#115)
Bumps [helm/chart-testing-action](https://github.com/helm/chart-testing-action) from 2.6.1 to 2.7.0.
- [Release notes](https://github.com/helm/chart-testing-action/releases)
- [Commits](https://github.com/helm/chart-testing-action/compare/v2.6.1...v2.7.0)

---
updated-dependencies:
- dependency-name: helm/chart-testing-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-02-14 08:14:24 -06:00
dependabot[bot] b7e4bf3761
chore(deps): Bump actions/setup-python from 5.3.0 to 5.4.0 (#116)
Bumps [actions/setup-python](https://github.com/actions/setup-python) from 5.3.0 to 5.4.0.
- [Release notes](https://github.com/actions/setup-python/releases)
- [Commits](https://github.com/actions/setup-python/compare/v5.3.0...v5.4.0)

---
updated-dependencies:
- dependency-name: actions/setup-python
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-02-14 08:13:19 -06:00
Taylor Thomas f9daa35480
chore(yaml): Update version in the kustomize yaml (#112)
We were pinned to an old version so when I deployed as directed in the
README it deployed an old version without the new features I needed

Signed-off-by: Taylor Thomas <taylor@cosmonic.com>
2025-01-10 16:00:50 -06:00
Joonas Bergius d74ad8bff7
feat: Support customizing the container templates for wasmcloud and nats (#110)
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2025-01-09 13:01:37 -06:00
Joonas Bergius 4ccfb20b30
chore(wasmcloud-operator-types): Add cargo machete ignore for serde_json with NOTE (#104)
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-11-18 12:38:28 -06:00
dependabot[bot] 59552d2277
chore(deps): Bump actions/setup-python from 5.1.1 to 5.3.0 (#102)
Bumps [actions/setup-python](https://github.com/actions/setup-python) from 5.1.1 to 5.3.0.
- [Release notes](https://github.com/actions/setup-python/releases)
- [Commits](https://github.com/actions/setup-python/compare/v5.1.1...v5.3.0)

---
updated-dependencies:
- dependency-name: actions/setup-python
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-10-25 09:36:21 -05:00
Joonas Bergius 1b129d69c9
chore: List maintainer affiliations (#99)
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-10-12 19:02:47 -05:00
Joonas Bergius fff57db4fb chore: Add CODEOWNERS file
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-10-12 16:05:58 -04:00
Brooks Townsend 1c85b3937c chore: add MAINTAINERS.md
Signed-off-by: Brooks Townsend <brooksmtownsend@gmail.com>
2024-10-12 14:45:36 -04:00
Joonas Bergius 309b492dcd
feat: Add rudimentary version flag (#95)
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-09-23 19:29:15 -05:00
Joonas Bergius 0aaa277422
feat: Add wolfi-based image (#94)
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-09-23 14:40:13 -05:00
Lucas Fontes 6f63289a11
feat: Configure max component memory (#91)
* feat: Configure max component memory

Signed-off-by: Lucas Fontes <lucas@cosmonic.com>

* chore: Bumping versions

Signed-off-by: Lucas Fontes <lucas@cosmonic.com>

---------

Signed-off-by: Lucas Fontes <lucas@cosmonic.com>
2024-08-20 14:33:52 -05:00
Bailey Hayes 788cbc089d
fix(examples): add daemonscaler to httpserver (#87)
This will allow the k8s operator to automatically
create services. This shouldn't be required in
the future when the operator supports the spreadscaler
for httpserver providers as well.

Signed-off-by: Bailey Hayes <behayes2@gmail.com>
2024-08-12 12:31:42 -05:00
Joonas Bergius 4ab407dc15
chore: Bump wasmcloud-operator chart appVersion (#85)
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-08-01 12:52:00 -05:00
Joonas Bergius d90e46055e
feat: Add support for configuring wasmCloud Secrets (#82)
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-08-01 12:13:10 -05:00
Joonas Bergius 5988e643e7
chore: Address Clippy lints and nested-ifs (#76)
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-07-26 10:39:25 -05:00
Joonas Bergius 69dac08133
fix: Upgrade opentelemetry to 0.21 (#80)
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-07-26 10:39:11 -05:00
Dan Norris 2906b3f56e
Merge pull request #63 from wasmCloud/dependabot/cargo/serde-1.0.193
chore(deps): Bump serde from 1.0.189 to 1.0.193
2024-07-26 14:28:51 +00:00
Joonas Bergius ba909d170c
chore: Bump wasmcloud-operator version references in deployment artifacts (#79)
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-07-23 11:16:23 -05:00
Lucas Fontes f7d3ddef5f
chore: Update Example Setup (#70)
---------

Signed-off-by: Lucas Fontes <lucas@cosmonic.com>
Signed-off-by: Lucas Fontes <lxfontes@gmail.com>
Co-authored-by: Joonas Bergius <joonas@users.noreply.github.com>
2024-07-22 13:43:50 -05:00
dependabot[bot] 0c024e7718
chore(deps): Bump serde from 1.0.189 to 1.0.193
Bumps [serde](https://github.com/serde-rs/serde) from 1.0.189 to 1.0.193.
- [Release notes](https://github.com/serde-rs/serde/releases)
- [Commits](https://github.com/serde-rs/serde/compare/v1.0.189...v1.0.193)

---
updated-dependencies:
- dependency-name: serde
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-07-18 14:32:10 +00:00
dependabot[bot] e6de0f9394
chore(deps): Bump anyhow from 1.0.75 to 1.0.86 (#61)
Bumps [anyhow](https://github.com/dtolnay/anyhow) from 1.0.75 to 1.0.86.
- [Release notes](https://github.com/dtolnay/anyhow/releases)
- [Commits](https://github.com/dtolnay/anyhow/compare/1.0.75...1.0.86)

---
updated-dependencies:
- dependency-name: anyhow
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-07-18 09:31:56 -05:00
dependabot[bot] 38e77cd643
chore(deps): Bump helm/kind-action from 1.9.0 to 1.10.0 (#59)
Bumps [helm/kind-action](https://github.com/helm/kind-action) from 1.9.0 to 1.10.0.
- [Release notes](https://github.com/helm/kind-action/releases)
- [Commits](https://github.com/helm/kind-action/compare/v1.9.0...v1.10.0)

---
updated-dependencies:
- dependency-name: helm/kind-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-07-18 09:31:21 -05:00
dependabot[bot] 226f9ab39e
chore(deps): Bump json-patch from 1.2.0 to 1.4.0 (#58)
Bumps [json-patch](https://github.com/idubrov/json-patch) from 1.2.0 to 1.4.0.
- [Changelog](https://github.com/idubrov/json-patch/blob/main/CHANGELOG.md)
- [Commits](https://github.com/idubrov/json-patch/compare/v1.2.0...v1.4.0)

---
updated-dependencies:
- dependency-name: json-patch
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-07-18 09:30:46 -05:00
dependabot[bot] ee120eb5d1
chore(deps): Bump actions/checkout from 3 to 4 (#57)
Bumps [actions/checkout](https://github.com/actions/checkout) from 3 to 4.
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](https://github.com/actions/checkout/compare/v3...v4)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-07-18 09:26:33 -05:00
dependabot[bot] 26deb719ac
chore(deps): Bump docker/login-action from 2 to 3 (#55)
Bumps [docker/login-action](https://github.com/docker/login-action) from 2 to 3.
- [Release notes](https://github.com/docker/login-action/releases)
- [Commits](https://github.com/docker/login-action/compare/v2...v3)

---
updated-dependencies:
- dependency-name: docker/login-action
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-07-18 09:26:16 -05:00
dependabot[bot] c30e1f8953
chore(deps): Bump docker/build-push-action from 4 to 6 (#54)
Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 4 to 6.
- [Release notes](https://github.com/docker/build-push-action/releases)
- [Commits](https://github.com/docker/build-push-action/compare/v4...v6)

---
updated-dependencies:
- dependency-name: docker/build-push-action
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-07-18 09:25:35 -05:00
Lucas Fontes 97f0c12d03
feat: Certificate Authorities to wasmcloud host (#72)
---------

Signed-off-by: Lucas Fontes <lucas@cosmonic.com>
2024-07-16 19:46:29 -05:00
dependabot[bot] 7c709465c0
chore(deps): Bump actions/setup-python from 5.0.0 to 5.1.1 (#71)
Bumps [actions/setup-python](https://github.com/actions/setup-python) from 5.0.0 to 5.1.1.
- [Release notes](https://github.com/actions/setup-python/releases)
- [Commits](https://github.com/actions/setup-python/compare/v5.0.0...v5.1.1)

---
updated-dependencies:
- dependency-name: actions/setup-python
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-07-11 20:13:16 -05:00
Lucas Fontes 4f4bde877e
refactor: Making issuers & secret optional (#68)
refactor: Making issuers & secret optional
---------
Signed-off-by: Bailey Hayes <behayes2@gmail.com>
Signed-off-by: Lucas Fontes <lucas@cosmonic.com>
Signed-off-by: Lucas Fontes <lxfontes@gmail.com>
Co-authored-by: Bailey Hayes <behayes2@gmail.com>
2024-07-10 23:39:21 -05:00
Joonas Bergius 32fa6e40da
Merge pull request #66 from joonas/chore/bump-wasmcloud-operator-0_3_1
chore: Bump wasmcloud-operator to 0.3.1
2024-07-03 11:42:17 -05:00
Joonas Bergius 674ee31cb0
chore: Bump wasmcloud-operator to 0.3.1
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-07-03 11:23:01 -05:00
Joonas Bergius 27e83fe24c
Merge pull request #65 from joonas/fix/patching-applications
fix: Improve handling for patching existing applications
2024-07-03 08:24:30 -05:00
Joonas Bergius d4a47a4d7c
fix: Improve handling for patching existing applications
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-07-02 18:54:54 -05:00
Joonas Bergius 0ebc5229bb
Merge pull request #64 from joonas/chore/operator-0_3_0
chore: Tag wasmcloud-operator 0.3.0
2024-07-02 12:16:39 -05:00
Joonas Bergius 8853b526d4
chore: Tag wasmcloud-operator 0.3.0
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-07-02 12:01:10 -05:00
Joonas Bergius d0f810cd6c
Merge pull request #51 from joonas/chore/adapt-to-new-wadm_events-stream-hierarchy
chore: Adapt to the new wadm_events stream hierarchy
2024-07-02 11:57:18 -05:00
Joonas Bergius bf8a74acfd
Merge pull request #53 from joonas/feat/enable-observability
feat: Add support for configuring observability via WasmCloudHostConfig
2024-07-02 11:47:51 -05:00
Joonas Bergius 00bc95f293
chore: Adapt to the new wadm_events stream hierarchy
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-07-02 11:36:06 -05:00
Joonas Bergius b3cfb7da54
Merge pull request #45 from joonas/chore/add-dependabot
chore: Add dependabot configuration for dependency updates
2024-07-01 18:09:36 -05:00
Joonas Bergius e7a5e9dc1f
Merge pull request #44 from joonas/chore/improve-readme-syntax-highlighting
chore: Update image customization examples to include YAML syntax higlighting
2024-07-01 18:09:24 -05:00
Joonas Bergius 845f78841a
Merge pull request #52 from joonas/fix/get-application
fix: Use new struct to combine Manifest and Status structs for get_application
2024-07-01 18:09:09 -05:00
Joonas Bergius f83dbf9215
feat: Add support for enabling observability via WasmCloudHostConfig
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-06-30 16:22:09 -05:00
Joonas Bergius fa5594bab8
fix: Use new struct to combine Manifest and Status structs for get_application
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-06-29 21:00:56 -05:00
Joonas Bergius 7dcb409bde
Merge pull request #38 from joonas/feat/use-wadm-client-and-types-crates
feat: Use wadm-client and wadm-types crates for wadm interactions
2024-06-27 13:38:13 -05:00
Joonas Bergius b1d88813f4
feat: Use wadm-client and wadm-types crates for wadm interactions
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-06-27 10:16:09 -05:00
Joonas Bergius de5026f4e0
chore: Add dependabot configuration for dependency updates
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-06-12 15:17:33 -05:00
Joonas Bergius 43d087f1ad
chore: Update image customization examples to include YAML syntax highlighting
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-06-12 15:12:04 -05:00
Joonas Bergius 280f2648e6
Merge pull request #43 from joonas/chore/version-bump-and-readme-update
chore: Bump the operator version, update README with instructions on how to override images
2024-06-12 14:50:27 -05:00
Joonas Bergius 105e2c54dc
chore: Bump the operator version, update README with instructions on how to override images
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-06-12 13:16:39 -05:00
Joonas Bergius 0952050708
Merge pull request #42 from joonas/feat/custom-nats-leaf-image-option
feat: Add option to customize NATS leaf node image
2024-06-12 12:55:16 -05:00
Joonas Bergius 5be39536f3
feat: Add option to customize NATS leaf node image
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-06-12 11:55:57 -05:00
Dan Norris 3aa081ebbe
Merge pull request #40 from wasmCloud/update_deployments_0.2.3
chore: update kustomization and Helm chart to default to 0.2.3
2024-06-12 12:20:28 -04:00
Dan Norris 2a9db6a87a chore: update kustomization and Helm chart to default to 0.2.3
Signed-off-by: Dan Norris <protochron@users.noreply.github.com>
2024-06-12 11:59:08 -04:00
Dan Norris 86d26fd8cd
Merge pull request #39 from wasmCloud/0.2.3_add_version
chore: prepare for 0.2.3 release
2024-06-11 20:37:01 -04:00
Dan Norris a0dfdff70d chore: prepare for 0.2.3 release
Signed-off-by: Dan Norris <protochron@users.noreply.github.com>
2024-06-11 20:24:08 -04:00
Dan Norris cfc139f36d
Merge pull request #37 from wasmCloud/fix_nats_ports
fix: add options for setting NATS leaf and client ports separately
2024-05-28 09:57:21 -04:00
Dan Norris 6df71d1b6f fix: add options for setting NATS leaf and client ports separately
PR #35 introduced a small regression which only impacts clients who are
trying to use nonstandard ports. This fixes the problem by allowing the
NATS leaf node and client ports to be configured separately in the CRD.

Signed-off-by: Dan Norris <protochron@users.noreply.github.com>
2024-05-28 09:50:36 -04:00
Dan Norris 45ada5a9b2
Merge pull request #36 from wasmCloud/0.2.2_release
chore: bump wasmcloud-operator version for 0.2.2 release
2024-05-27 15:41:08 -04:00
Dan Norris 5d0b199ff7 chore: bump wasmcloud-operator version for 0.2.2 release
Signed-off-by: Dan Norris <protochron@users.noreply.github.com>
2024-05-27 15:17:30 -04:00
Dan Norris 178ee27fe9
Merge pull request #35 from wasmCloud/fix_port_spec
fix: allow nats address to include a port
2024-05-27 15:07:14 -04:00
Dan Norris 8ccdbb784d fix: allow nats address to include a port
This fix omits the preset NATS leaf node port `7422` from being
automatically set if the `natsAddress` field is set in the CRD. This
allows you to specify the port if it is not using the default at the
cost of making it required.

Signed-off-by: Dan Norris <protochron@users.noreply.github.com>
2024-05-27 14:39:15 -04:00
Joonas Bergius 7a38f50847
Merge pull request #34 from thogar-computer/patch-1
Update README.md
2024-05-17 17:13:34 -05:00
Thomas Gardner 6b0f38ebf1
Update README.md
These are the changes I've made locally to get the instructions running :)

Signed-off-by: Thomas Gardner <thogar@noc.ac.uk>
2024-05-17 21:52:12 +01:00
Dan Norris 35af8adb73
Merge pull request #32 from wasmCloud/update_readme
chore: update README with latest version, example policy topic
2024-05-10 12:24:57 -04:00
Dan Norris dd22a19034 chore: update README with latest version, example policy topic
Signed-off-by: Dan Norris <protochron@users.noreply.github.com>
2024-05-10 12:24:15 -04:00
Dan Norris 77b8c86945
Merge pull request #31 from wasmCloud/policy_service_config
feat: add additional host configuration options for images, policy
2024-05-09 13:14:21 -04:00
Dan Norris d2266002aa feat: add additional host configuration options for images, policy
Add additional host options for the policy service, allowing insecure
registries and pulling the latest tag of an OCI ref.

Signed-off-by: Dan Norris <protochron@users.noreply.github.com>
2024-05-09 13:10:05 -04:00
Joonas Bergius f190e50c6f
Merge pull request #29 from joonas/fix/chart-publishing-pipeline
Use the full chart path for helm package
2024-04-30 11:25:47 -05:00
Joonas Bergius fcf4a445e3
Use the full chart path for helm package
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-04-30 11:14:02 -05:00
Joonas Bergius 58b976c250
Merge pull request #28 from joonas/joonas/add-pods-and-endpointslices-permissions-to-chart
Add Pods and EndpointSlices permissions to wasmcloud-operator ClusterRole
2024-04-26 10:41:36 -05:00
Joonas Bergius 878a008a01
Merge pull request #27 from joonas/joonas/add-conditions-to-endpoints
Add conditions to Endpoints part of the EndpointSlices reflecting wadm application Service entries
2024-04-26 10:41:20 -05:00
Joonas Bergius 60723198aa
Add Pods and EndpointSlices permissions to operator CR
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-04-25 16:35:57 -05:00
Joonas Bergius 1aa3942b6e
Add conditions to Endpoints part of the EndpointSlices reflecting wadm application Service entries
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-04-24 19:50:44 -05:00
Dan Norris 4804546fd4
Merge pull request #26 from wasmCloud/update_wasmcloud_1.0_deps
chore: update wadm and wash-lib to the latest released versions
2024-04-17 19:45:44 -04:00
Dan Norris cd02677b16 chore: update wadm and wash-lib to the latest released versions
Update to use the latest wasmCloud 1.0 compatible versions of wadm and
wash-lib.

Signed-off-by: Dan Norris <protochron@users.noreply.github.com>
2024-04-17 19:39:20 -04:00
Taylor Thomas a9a05905c1
Merge pull request #25 from thomastaylor312/chore/1.0_bump
feat(*): Bumps to 0.2 for wasmcloud 1.0 support
2024-04-17 17:22:39 -06:00
Joonas Bergius 82ab69b84f
Merge pull request #19 from joonas/joonas/fix-up-readme-and-add-links
Fix up some README typos and add relevant links
2024-04-17 18:02:29 -05:00
Taylor Thomas a947d144cb feat(*): Bumps to 0.2 for wasmcloud 1.0 support
This bumps both the version of the operator and the default host version
in the example to 1.0. Also updates the README to use the released wadm
chart

Signed-off-by: Taylor Thomas <taylor@cosmonic.com>
2024-04-17 16:47:23 -06:00
Joonas Bergius 72f58558bd
Fix up some README typos and add relevant links
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-04-15 10:49:55 -05:00
Joonas Bergius 692d78308b
Merge pull request #20 from ericgregory/readme
Add wadm instructions to readme
2024-04-15 10:48:25 -05:00
Taylor Thomas 1659819c23
Merge pull request #21 from thomastaylor312/ref/small_changes_and_bump
ref(*): Refactors some copied code and bumps version
2024-04-13 16:04:06 -05:00
Taylor Thomas 9eb70ec23a ref(*): Refactors some copied code and bumps version
This fixes a few Rust-y things and dedupes some code I found while
addressing clippy errors

Signed-off-by: Taylor Thomas <taylor@cosmonic.com>
2024-04-13 13:52:40 -06:00
Eric Gregory 04607fdcb5 Add wadm instructions to readme
Signed-off-by: Eric Gregory <eric@cosmonic.com>
2024-04-12 09:41:02 -04:00
Dan Norris 9a9a4a8f97
Merge pull request #18 from wasmCloud/1.0_compat
feat: rework the operator for wasmCloud 1.0 compatibility
2024-04-10 17:24:20 -04:00
Dan Norris 4d378f263f feat: rework the operator for wasmCloud 1.0 compatibility
wasmCloud 1.0 fully converts over to using components among other large
changes, so we need to rework the operator to account for it. 1.0 is
still in the works, so this updates the operator to depend on wasmCloud
1.0 libraries along with refactoring the service creation code to use
the updated application manifest format.

Once this is in we will likely cut a 0.2.0 pre-release and wait for an
actual release until wasmCloud 1.0.0 and wadm 0.11 are out.

This also adds a new field to the CRD so that you can specify the full
image reference instead of just the version. The `image` parameter takes
precedence.

Signed-off-by: Dan Norris <protochron@users.noreply.github.com>
2024-04-10 16:27:00 -04:00
Dan Norris 8bcd13b1e1
Merge pull request #14 from wasmCloud/service_sync
feat: automatically create Kubernetes Services for apps using a httpserver component
2024-04-09 12:13:08 -04:00
Dan Norris 0121602ee7 feat: Refactor configuration into a module
Also add support for setting the number of stream replicas when creating
the stream mirror for the operator.

Signed-off-by: Dan Norris <protochron@users.noreply.github.com>
2024-04-09 11:12:43 -04:00
Dan Norris b93332029f feat: automatically create Kubernetes Services for apps using a httpserver component
One advantage of wasmCloud applications is that we always know what
components make up an application and therefore can intelligently make
decisions based on what interfaces we're using. This makes it much
easier for software like the operator to take particular actions based
on the contents of an application manifest.

This change adds an additional set of reconciliations we can perform on
a per-lattice basis to automatically create Kubernetes Services for
applications that deploy a httpserver component. The operator uses a
NATS consumer that watches for manifest deploy and undeploy events and
triggers a reconciliation on a managed Service. Right now this is
restricted only to `daemonscalers`, since we do not have to do any
bookeeping on where components are deployed if they are running on all
of the hosts in a lattice or in a subset identified by label. We will
add support for `spreadscalers` in a future PR.

This allows for some interesting deployment scenarios, such as wasmCloud
applications that span multiple Kubernetes deployments in the same
namespace, or potentially in _multiple namespaces_ if we decide to
implement support for them. Initially the operator is only creating
endpoints that route traffic from a service to pods in the same
namespace, but we may add an option to relax that assumption.

Signed-off-by: Dan Norris <protochron@users.noreply.github.com>
2024-04-09 10:09:44 -04:00
Dan Norris bad704df22
Merge pull request #13 from wasmCloud/dependabot/cargo/h2-0.3.26
chore(deps): Bump h2 from 0.3.25 to 0.3.26
2024-04-08 11:11:49 -04:00
dependabot[bot] fe4ee8af32
chore(deps): Bump h2 from 0.3.25 to 0.3.26
Bumps [h2](https://github.com/hyperium/h2) from 0.3.25 to 0.3.26.
- [Release notes](https://github.com/hyperium/h2/releases)
- [Changelog](https://github.com/hyperium/h2/blob/v0.3.26/CHANGELOG.md)
- [Commits](https://github.com/hyperium/h2/compare/v0.3.25...v0.3.26)

---
updated-dependencies:
- dependency-name: h2
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-04-08 15:07:25 +00:00
Joonas Bergius 290db9919c
Merge pull request #10 from joonas/chore/publish-charts-under-separate-namespace
chore: Update charts to be published under separate namespace
2024-04-01 13:51:57 -05:00
Dan Norris 58a9799b0e
Merge pull request #9 from wasmCloud/node_affinity
feat: enable additional scheduling options for wasmCloud host pods
2024-04-01 14:33:56 -04:00
Dan Norris e671b4f12b feat: enable additional scheduling options for wasmCloud host pods
This refactors the WasmCloudHostConfig CRD so that it has a single field
(`schedulingOptions`) for configuring how the underlying Pods are
scheduled in Kubernetes. This includes:
* Relocating the `daemonset` option to this new field
* Relocating the `resources` option to this new field
* Adding a new `pod_template_additions` field that allows you to set any
  valid option in a `PodSpec`

Doing so allows cluster operators to do things like set node affinity
and node selector rules, along with any other valid PodSpec option. The
only thing that cannot be done is adding additional containers to the
pod, since that is all handled by the controller. We could look at
exposing that option if users want to be able to add additional
sidecars.

Signed-off-by: Dan Norris <protochron@users.noreply.github.com>
2024-04-01 13:53:39 -04:00
Joonas Bergius 8c1fa43765
chore: Update charts to be published under separate namespace
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-04-01 12:42:03 -05:00
Joonas Bergius 50a2d67798
Merge pull request #8 from joonas/feat/add-helm-chart
feat: Add helm chart
2024-03-29 18:20:52 -05:00
Joonas Bergius a4a0035aa2
feat: Add helm chart
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-03-29 16:06:54 -05:00
Joonas Bergius 88369e3cdd
Merge pull request #7 from joonas/chore/add-dependency-review
chore: Add dependency-review workflow for pull requests
2024-03-29 16:06:08 -05:00
Joonas Bergius baee51eecc
chore: Add dependency-review workflow for pull requests
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-03-29 12:43:27 -05:00
Joonas Bergius 3fa82d0857
Merge pull request #6 from wasmCloud/dependabot/cargo/h2-0.3.25
chore(deps): Bump h2 from 0.3.21 to 0.3.25
2024-03-29 12:29:53 -05:00
Joonas Bergius df435d802b
Merge pull request #5 from wasmCloud/dependabot/cargo/unsafe-libyaml-0.2.11
chore(deps): Bump unsafe-libyaml from 0.2.9 to 0.2.11
2024-03-29 12:29:27 -05:00
Joonas Bergius ca3527b6f3
Merge pull request #4 from joonas/fix/GHSA-g98v-hv3f-hcfr
fix: Address GHSA-g98v-hv3f-hcfr
2024-03-29 12:27:42 -05:00
dependabot[bot] d9111366a2
chore(deps): Bump h2 from 0.3.21 to 0.3.25
Bumps [h2](https://github.com/hyperium/h2) from 0.3.21 to 0.3.25.
- [Release notes](https://github.com/hyperium/h2/releases)
- [Changelog](https://github.com/hyperium/h2/blob/v0.3.25/CHANGELOG.md)
- [Commits](https://github.com/hyperium/h2/compare/v0.3.21...v0.3.25)

---
updated-dependencies:
- dependency-name: h2
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-03-29 17:25:16 +00:00
dependabot[bot] bdc43302b2
chore(deps): Bump unsafe-libyaml from 0.2.9 to 0.2.11
Bumps [unsafe-libyaml](https://github.com/dtolnay/unsafe-libyaml) from 0.2.9 to 0.2.11.
- [Release notes](https://github.com/dtolnay/unsafe-libyaml/releases)
- [Commits](https://github.com/dtolnay/unsafe-libyaml/compare/0.2.9...0.2.11)

---
updated-dependencies:
- dependency-name: unsafe-libyaml
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-03-29 17:24:48 +00:00
Joonas Bergius 1726808a14
fix: Address GHSA-g98v-hv3f-hcfr
Signed-off-by: Joonas Bergius <joonas@cosmonic.com>
2024-03-29 12:22:04 -05:00
Dan Norris 00adb860fc
Merge pull request #3 from wasmCloud/fix_oci_vars
fix: use the correct OCI registry environment variables for newer hosts
2024-03-28 09:21:58 -04:00
Dan Norris debadb3109 fix: use the correct OCI registry environment variables for newer hosts
In wasmCloud 0.81.0 we deprecated the old `OCI_REGISTRY` variables in
favor of prefixing them with `WASMCLOUD`. 0.82.0 fully removed those
variables, so this updates the controller to use the new variables with
an eye towards wasmCloud 1.0 compatibility.

Signed-off-by: Dan Norris <protochron@users.noreply.github.com>
2024-03-28 09:18:06 -04:00
Dan Norris 0756cdeeff
Merge pull request #2 from wasmCloud/add_credentials_docs
chore: add docs on how to use the `registryCredentialsSecret` option
2024-03-27 19:51:18 -04:00
Dan Norris c0e782dc1b chore: add docs on how to use the `registryCredentialsSecret` option
Add documentation on how to use the `registryCredentialsSecret` to
enable wasmCloud hosts to pull from a private registry.

Also add comments to each of the CRD fields outlining what each field
does.

Signed-off-by: Dan Norris <protochron@users.noreply.github.com>
2024-03-27 19:46:13 -04:00
Dan Norris b375604010
Merge pull request #1 from protochron/daemonset_option
feat: add option to start hosts as a DaemonSet
2024-03-27 12:58:29 +00:00
Dan Norris 72fa8af4d6 feat: add option to start hosts as a DaemonSet
Add a `daemonset` option for managing hosts as a DaemonSet instead of
just a Deployment.

Signed-off-by: Dan Norris <protochron@users.noreply.github.com>
2024-03-27 08:37:40 -04:00
44 changed files with 4115 additions and 2312 deletions

2
.github/CODEOWNERS vendored Normal file
View File

@ -0,0 +1,2 @@
# wasmCloud operator maintainers
* @wasmCloud/operator-maintainers

10
.github/dependabot.yml vendored Normal file
View File

@ -0,0 +1,10 @@
version: 2
updates:
- package-ecosystem: "cargo"
directory: "/"
schedule:
interval: "daily"
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"

9
.github/dependency-review-config.yml vendored Normal file
View File

@ -0,0 +1,9 @@
# For more details on the available options, see:
# https://github.com/actions/dependency-review-action?tab=readme-ov-file#configuration-options
fail-on-severity: critical
comment-summary-in-pr: always
show-openssf-scorecard: true
warn-on-openssf-scorecard-level: 3

View File

@ -16,7 +16,7 @@ jobs:
check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: Lint
run: |
cargo clippy -- --no-deps
@ -71,7 +71,7 @@ jobs:
uses: docker/setup-buildx-action@v3
- name: Log in to the Container registry
uses: docker/login-action@v2
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.repository_owner }}
@ -86,6 +86,15 @@ jobs:
type=sha,prefix=
type=semver,pattern={{version}}
- name: Extract metadata (tags, labels) for Docker
id: meta_wolfi
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: |
type=sha,prefix=,suffix=-wolfi
type=semver,pattern={{version}},suffix=-wolfi
- name: Load artifacts
uses: actions/download-artifact@v4
with:
@ -98,7 +107,7 @@ jobs:
chmod +x artifacts/wasmcloud-operator*
- name: Build and push Docker image
uses: docker/build-push-action@v4
uses: docker/build-push-action@v6
with:
push: true
context: .
@ -106,3 +115,14 @@ jobs:
labels: ${{ steps.meta.outputs.labels }}
platforms: linux/amd64,linux/arm64
build-args: "BIN_PATH=artifacts/wasmcloud-operator"
- name: Build and push Docker image (wolfi)
uses: docker/build-push-action@v6
with:
push: true
context: .
file: './Dockerfile.wolfi'
tags: ${{ steps.meta_wolfi.outputs.tags }}
labels: ${{ steps.meta_wolfi.outputs.labels }}
platforms: linux/amd64,linux/arm64
build-args: "BIN_PATH=artifacts/wasmcloud-operator"

96
.github/workflows/chart.yaml vendored Normal file
View File

@ -0,0 +1,96 @@
name: chart
env:
HELM_VERSION: v3.14.0
on:
push:
tags:
- 'chart-v[0-9].[0-9]+.[0-9]+'
pull_request:
paths:
- 'charts/**'
- '.github/workflows/chart.yml'
jobs:
validate:
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Fetch main branch for chart-testing
run: |
git fetch origin main:main
- name: Set up Helm
uses: azure/setup-helm@v4
with:
version: ${{ env.HELM_VERSION }}
# Used by helm chart-testing below
- name: Set up Python
uses: actions/setup-python@v5.6.0
with:
python-version: '3.12.2'
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.7.0
with:
version: v3.10.1
yamllint_version: 1.35.1
yamale_version: 5.0.0
- name: Run chart-testing (lint)
run: |
ct lint --config charts/wasmcloud-operator/ct.yaml
- name: Create kind cluster
uses: helm/kind-action@v1.12.0
with:
version: "v0.22.0"
- name: Run chart-testing (install)
run: |
ct install --config charts/wasmcloud-operator/ct.yaml
publish:
if: ${{ startsWith(github.ref, 'refs/tags/chart-v') }}
runs-on: ubuntu-22.04
needs: validate
permissions:
packages: write
steps:
- uses: actions/checkout@v4
- name: Set up Helm
uses: azure/setup-helm@v4
with:
version: ${{ env.HELM_VERSION }}
- name: Package
run: |
helm package charts/wasmcloud-operator -d .helm-charts
- name: Login to GHCR
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Lowercase the organization name for ghcr.io
run: |
echo "GHCR_REPO_NAMESPACE=${GITHUB_REPOSITORY_OWNER,,}" >>${GITHUB_ENV}
- name: Publish
run: |
for chart in .helm-charts/*; do
if [ -z "${chart:-}" ]; then
break
fi
helm push "${chart}" "oci://ghcr.io/${{ env.GHCR_REPO_NAMESPACE }}/charts"
done

19
.github/workflows/dependency-review.yml vendored Normal file
View File

@ -0,0 +1,19 @@
name: 'Dependency Review'
on:
pull_request:
permissions:
contents: read
pull-requests: write
jobs:
dependency-review:
runs-on: ubuntu-latest
steps:
- name: 'Checkout Repository'
uses: actions/checkout@v4
- name: Dependency Review
uses: actions/dependency-review-action@v4
with:
config-file: './.github/dependency-review-config.yml'

1641
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
[package]
name = "wasmcloud-operator"
version = "0.1.0"
version = "0.5.0"
edition = "2021"
[[bin]]
@ -21,51 +21,64 @@ path = "src/lib.rs"
edition = "2021"
[dependencies]
async-nats = {workspace = true}
axum = {workspace = true}
axum-server = {workspace = true}
anyhow = {workspace = true}
atty = {workspace = true}
ctrlc = {workspace = true}
futures = {workspace = true}
handlebars = {workspace = true}
json-patch = {workspace = true}
k8s-openapi = {workspace = true, features = ["v1_28", "schemars"]}
kube = {workspace = true, features = ["runtime", "derive", "default"]}
opentelemetry = {workspace = true}
opentelemetry-otlp = {workspace = true}
rcgen = {workspace = true}
schemars = {workspace = true}
secrecy = {workspace = true}
serde = {workspace = true}
serde_json = {workspace = true}
serde_yaml = {workspace = true}
thiserror = {workspace = true}
time = {workspace = true}
tokio = {workspace = true}
tracing = {workspace = true}
tracing-opentelemetry = {workspace = true}
tracing-subscriber = {workspace = true}
utoipa = {workspace = true}
uuid = {workspace = true}
wadm = {workspace = true}
wash-lib = {workspace = true}
wasmcloud-operator-types = {workspace = true}
async-nats = { workspace = true }
axum = { workspace = true }
axum-server = { workspace = true }
anyhow = { workspace = true }
ctrlc = { workspace = true }
cloudevents-sdk = { workspace = true }
config = { workspace = true }
futures = { workspace = true }
handlebars = { workspace = true }
json-patch = { workspace = true }
k8s-openapi = { workspace = true, features = ["v1_28", "schemars"] }
kube = { workspace = true, features = ["runtime", "derive", "default"] }
opentelemetry = { workspace = true }
opentelemetry_sdk = { workspace = true }
opentelemetry-otlp = { workspace = true }
rcgen = { workspace = true }
schemars = { workspace = true }
secrecy = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
serde_yaml = { workspace = true }
thiserror = { workspace = true }
time = { workspace = true }
tokio = { workspace = true }
tokio-util = { workspace = true }
tracing = { workspace = true }
tracing-opentelemetry = { workspace = true }
tracing-subscriber = { workspace = true }
utoipa = { workspace = true }
uuid = { workspace = true }
wadm = { workspace = true }
wadm-client = { workspace = true }
wadm-types = { workspace = true }
wasmcloud-operator-types = { workspace = true }
[workspace.dependencies]
async-nats = "0.33"
axum = { version = "0.6", features = ["headers"] }
axum-server = { version = "0.4", features = ["tls-rustls"] }
anyhow = "1"
atty = "0.2"
config = { version = "0.14", default-features = false, features = [
"convert-case",
"async",
] }
cloudevents-sdk = "0.7"
ctrlc = "3"
futures = "0.3"
handlebars = "5.1"
json-patch = "1.2.0"
k8s-openapi = { version = "0.20", default-features = false}
kube = { version = "0.87", default-features = false}
opentelemetry = { version = "0.20", features = ["metrics", "trace", "rt-tokio"] }
opentelemetry-otlp = { version = "0.13", features = ["tokio"] }
json-patch = "1.4.0"
k8s-openapi = { version = "0.20", default-features = false }
kube = { version = "0.87", default-features = false }
opentelemetry = { version = "0.21", default-features = false }
opentelemetry_sdk = { version = "0.21", features = [
"metrics",
"trace",
"rt-tokio",
] }
opentelemetry-otlp = { version = "0.14", features = ["tokio"] }
rcgen = "0.11"
schemars = "0.8"
secrecy = "0.8"
@ -75,19 +88,19 @@ serde_yaml = "0.9"
thiserror = "1"
time = "0.3"
tokio = { version = "1", features = ["full"] }
tokio-util = { version = "0.7", features = ["rt"] }
tracing = "0.1"
tracing-opentelemetry = "0.20"
tracing-opentelemetry = "0.22"
tracing-subscriber = { version = "0.3", features = ["env-filter", "json"] }
utoipa = { version = "4.1", features = ["axum_extras"] }
uuid = { version = "1", features = ["v5"] }
wadm = "0.10"
wash-lib = "0.17"
wasmcloud-operator-types = { version="*", path = "./crates/types" }
wadm = "0.13.0"
wadm-client = "0.2.0"
wadm-types = "0.2.0"
wasmcloud-operator-types = { version = "*", path = "./crates/types" }
[workspace]
members = [
"crates/*"
]
members = ["crates/*"]
resolver = "2"
[profile.release]

View File

@ -1,5 +1,5 @@
# syntax=docker/dockerfile:1
FROM rust:1.75-bookworm as builder
FROM rust:1.77-bookworm as builder
WORKDIR /app
COPY . .

7
Dockerfile.wolfi Normal file
View File

@ -0,0 +1,7 @@
# syntax=docker/dockerfile:1
FROM chainguard/wolfi-base:latest
ARG BIN_PATH
ARG TARGETARCH
COPY ${BIN_PATH}-${TARGETARCH} /usr/local/bin/wasmcloud-operator
ENTRYPOINT ["/usr/local/bin/wasmcloud-operator"]

View File

@ -187,7 +187,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Copyright 2024 wasmCloud Maintainers
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

22
MAINTAINERS.md Normal file
View File

@ -0,0 +1,22 @@
# MAINTAINERS
The following individuals are responsible for reviewing code, managing issues, and ensuring the overall quality of `wasmcloud-operator`.
## @wasmCloud/operator-maintainers
Name: Joonas Bergius
GitHub: @joonas
Organization: Cosmonic
Name: Dan Norris
GitHub: @protochron
Organization: Cosmonic
Name: Taylor Thomas
GitHub: @thomastaylor312
Organization: Cosmonic
Name: Lucas Fontes
GitHub: @lxfontes
Organization: Cosmonic

106
README.md
View File

@ -1,10 +1,10 @@
# wasmcloud-operator
An operator for managing a set of wasmCloud hosts running on Kubernetes and
manage wasmCloud appliations using WADM.
An operator for managing a set of [wasmCloud hosts](https://github.com/wasmCloud/wasmCloud/) running on Kubernetes and
manage [wasmCloud applications using wadm](https://github.com/wasmcloud/wadm).
The goal is to easily be able to run WasmCloud hosts on a Kubernetes cluster.
## CusterConfig Custom Resource Definition (CRD)
## WasmCloudHostConfig Custom Resource Definition (CRD)
The WasmCloudHostConfig CRD describes the desired state of a set of wasmCloud
hosts connected to the same lattice.
@ -17,18 +17,20 @@ metadata:
spec:
# The number of wasmCloud host pods to run
hostReplicas: 2
# The cluster issuers to use for each host
issuers:
- CDKF6OKPOBQKAX57UOXO7SCHURTOZWKWIVPC2HFJTGFXY5VJX44ECEHH
# The lattice to connect the hosts to
lattice: 83a5b52e-17cf-4080-bac8-f844099f142e
lattice: default
# Additional labels to apply to the host other than the defaults set in the operator
hostLabels:
some-label: value
# The address to connect to nats
natsAddress: nats://nats.default.svc.cluster.local
# Which wasmCloud version to use
version: 0.81.0
# The name of a secret in the same namespace that provides the required secrets.
secretName: cluster-secrets
version: 1.0.4
# Enable the following to run the wasmCloud hosts as a DaemonSet
#daemonset: true
# The name of the image pull secret to use with wasmCloud hosts so that they
# can authenticate to a private registry to pull components.
# registryCredentialsSecret: my-registry-secret
```
The CRD requires a Kubernetes Secret with the following keys:
@ -38,19 +40,54 @@ apiVersion: v1
kind: Secret
metadata:
name: my-wasmcloud-cluster
data:
# You can generate this with wash:
# wash keys gen cluster
WASMCLOUD_CLUSTER_SEED: <seed>
# Only required if using a NATS creds file
# nats.creds: <base64 encoded creds file>
# Only required if using OCI private registry
# OCI_REGISTRY_PASSWORD: <password>
#data:
# Only required if using a NATS creds file
# nats.creds: <creds file>
```
The operator will fail to provision the wasmCloud Deployment if any of these
secrets are missing!
#### Customizing the images used for wasmCloud host and NATS leaf
If you would like to customize the registry or image that gets used to provision the wasmCloud hosts and the NATS leaf that runs alongside them, you can specify the following options in the above `WasmCloudHostConfig` CRD.
For wasmCloud Host, use the `image` field:
```yaml
apiVersion: k8s.wasmcloud.dev/v1alpha1
kind: WasmCloudHostConfig
metadata:
name: my-wasmcloud-cluster
spec:
# other config options omitted
image: registry.example.com/wasmcloud:1.0.2
```
For the NATS leaf, use the `natsImageLeaf` field:
```yaml
apiVersion: k8s.wasmcloud.dev/v1alpha1
kind: WasmCloudHostConfig
metadata:
name: my-wasmcloud-cluster
spec:
# other config options omitted
natsLeafImage: registry.example.com/nats:2.10.16
```
### Image Pull Secrets
You can also specify an image pull secret to use use with the wasmCloud hosts
so that they can pull components from a private registry. This secret needs to
be in the same namespace as the WasmCloudHostConfig CRD and must be a
`kubernetes.io/dockerconfigjson` type secret. See the [Kubernetes
documentation](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#registry-secret-existing-credentials)
for more information on how to provision that secret.
Once it is created, you can reference it in the WasmCloudHostConfig CRD by
setting the `registryCredentialsSecret` field to the name of the secret.
## Deploying the operator
A wasmCloud cluster requires a few things to run:
@ -83,23 +120,45 @@ config:
```
```sh
helm upgrade --install -f values.yaml nats-cluster nats/nats
helm repo add nats https://nats-io.github.io/k8s/helm/charts/
helm upgrade --install -f values.yaml nats nats/nats
```
### Running WADM
### Running Wadm
WADM can be run as a standalone binary or as a container. The following
command will start WADM as a Kubernetes deployment:
You can run Wadm in your Kubernetes cluster using our Helm chart. For a minimal deployment using the
NATS server deployed above, all you need in your `values.yaml` file is:
```yaml
wadm:
config:
nats:
server: "nats.default.svc.cluster.local:4222"
```
You can deploy Wadm using your values file and Helm:
```sh
helm install wadm -f wadm-values.yaml --version 0.2.0 oci://ghcr.io/wasmcloud/charts/wadm
```
### Start the operator
```sh
kubectl kustomize build deploy/local | kubectl apply -f -
kubectl kustomize deploy/base | kubectl apply -f -
```
## Automatically Syncing Kubernetes Services
The operator automatically creates Kubernetes Services for wasmCloud
applications. Right now this is limited only to applications that deploy the
wasmCloud httpserver component using a `daemonscaler`, but additional support
for `spreadscalers` will be added in the future.
If you specify host label selectors on the `daemonscaler` then the operator
will honor those labels and will only create a service for the pods that match
those label selectors.
## Argo CD Health Check
Argo CD provides a way to define a [custom health
@ -157,7 +216,6 @@ data:
include [Kind](https://kind.sigs.k8s.io/) or Docker Desktop.
- `RUST_LOG=info cargo run`
## Types crate
This repo stores the types for any CRDs used by the operator in a separate

3
SECURITY.md Normal file
View File

@ -0,0 +1,3 @@
# Reporting a security issue
Please refer to the [wasmCloud Security Process and Policy](https://github.com/wasmCloud/wasmCloud/blob/main/SECURITY.md) for details on how to report security issues and vulnerabilities.

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,16 @@
apiVersion: v2
name: wasmcloud-operator
description: A Helm chart for deploying the wasmcloud-operator on Kubernetes
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.6
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "0.4.0"

View File

@ -0,0 +1,3 @@
validate-maintainers: false
target-branch: main # TODO: Remove this once chart-testing 3.10.1+ is released
helm-extra-args: --timeout 60s

View File

@ -0,0 +1,96 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "wasmcloud-operator.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "wasmcloud-operator.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "wasmcloud-operator.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Namespace that's used for setting up cluster role bindings and such
*/}}
{{- define "wasmcloud-operator.namespace" -}}
{{- default "default" .Release.Namespace }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "wasmcloud-operator.labels" -}}
helm.sh/chart: {{ include "wasmcloud-operator.chart" . }}
{{ include "wasmcloud-operator.selectorLabels" . }}
app.kubernetes.io/component: operator
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/part-of: wasmcloud-operator
{{- with .Values.additionalLabels }}
{{ . | toYaml }}
{{- end }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "wasmcloud-operator.selectorLabels" -}}
app.kubernetes.io/name: {{ include "wasmcloud-operator.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "wasmcloud-operator.service-account" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "wasmcloud-operator.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
{{/*
Create the name of the cluster role to use
*/}}
{{- define "wasmcloud-operator.cluster-role" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "wasmcloud-operator.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
{{/*
Create the name of the cluster role binding to use
*/}}
{{- define "wasmcloud-operator.cluster-role-binding" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "wasmcloud-operator.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,118 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ include "wasmcloud-operator.cluster-role" . }}
labels:
{{- include "wasmcloud-operator.labels" . | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- secrets
- services
- configmaps
- serviceaccounts
- pods
verbs:
- get
- list
- watch
- create
- delete
- patch
- update
- apiGroups:
- apps
resources:
- deployments
- daemonsets
verbs:
- get
- list
- watch
- create
- delete
- patch
- update
- apiGroups:
- rbac.authorization.k8s.io
resources:
- rolebindings
- roles
verbs:
- get
- list
- watch
- create
- delete
- patch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- get
- list
- watch
- create
- delete
- patch
- apiGroups:
- apiregistration.k8s.io
resources:
- apiservices
verbs:
- create
- delete
- get
- list
- patch
- update
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- create
- delete
- get
- list
- patch
- update
- apiGroups:
- k8s.wasmcloud.dev
resources:
- wasmcloudhostconfigs
- wasmcloudhostconfigs/status
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "wasmcloud-operator.cluster-role-binding" . }}
labels:
{{- include "wasmcloud-operator.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "wasmcloud-operator.cluster-role" . }}
subjects:
- apiGroup: ""
kind: ServiceAccount
name: {{ include "wasmcloud-operator.service-account" . }}
namespace: {{ include "wasmcloud-operator.namespace" . }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ include "wasmcloud-operator.cluster-role-binding" . }}-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- apiGroup: ""
kind: ServiceAccount
name: {{ include "wasmcloud-operator.service-account" . }}
namespace: {{ include "wasmcloud-operator.namespace" . }}

View File

@ -0,0 +1,61 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "wasmcloud-operator.fullname" . }}
labels:
{{- include "wasmcloud-operator.labels" . | nindent 4 }}
spec:
replicas: 1
selector:
matchLabels:
{{- include "wasmcloud-operator.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "wasmcloud-operator.labels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "wasmcloud-operator.service-account" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: RUST_LOG
value: info,async_nats=error
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
ports:
- name: https
containerPort: {{ .Values.service.port }}
protocol: TCP
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "wasmcloud-operator.fullname" . }}
labels:
{{- include "wasmcloud-operator.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: https
protocol: TCP
name: https
selector:
{{- include "wasmcloud-operator.selectorLabels" . | nindent 4 }}

View File

@ -0,0 +1,13 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "wasmcloud-operator.service-account" . }}
labels:
{{- include "wasmcloud-operator.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
automountServiceAccountToken: {{ .Values.serviceAccount.automount }}
{{- end }}

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Pod
metadata:
name: "{{ include "wasmcloud-operator.fullname" . }}-test-connection"
labels:
{{- include "wasmcloud-operator.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": test
spec:
containers:
- name: wget
image: alpine
command: ['wget']
args: ['--no-check-certificate', 'https://{{ include "wasmcloud-operator.fullname" . }}:{{ .Values.service.port }}/apis/core.oam.dev/v1beta1']
restartPolicy: Never

View File

@ -0,0 +1,63 @@
# Default values for wasmcloud-operator.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
image:
repository: ghcr.io/wasmcloud/wasmcloud-operator
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
additionalLabels: {}
# app: wasmcloud-operator
serviceAccount:
# Specifies whether a service account should be created
create: true
# Automatically mount a ServiceAccount's API credentials?
automount: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podLabels: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 8443
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}

View File

@ -1,11 +1,16 @@
[package]
name = "wasmcloud-operator-types"
version = "0.1.0"
version = "0.1.9"
edition = "2021"
[package.metadata.cargo-machete]
# NOTE: This exists because kube-derive needs it, and for reasons I don't
# fully understand, it's not coming through kube-derive's own depedendencies.
ignored = ["serde_json"]
[dependencies]
k8s-openapi = {workspace = true}
kube = {workspace = true, features = ["derive"]}
schemars = {workspace = true}
serde = {workspace = true}
serde_json = {workspace = true}
k8s-openapi = { workspace = true }
kube = { workspace = true, features = ["derive"] }
schemars = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }

View File

@ -1,8 +1,8 @@
use k8s_openapi::api::core::v1::ResourceRequirements;
use k8s_openapi::api::core::v1::{Container, PodSpec, ResourceRequirements, Volume};
use kube::CustomResource;
use schemars::JsonSchema;
use schemars::{gen::SchemaGenerator, schema::Schema, JsonSchema};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::collections::{BTreeMap, BTreeSet};
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
#[cfg_attr(test, derive(Default))]
@ -10,33 +10,195 @@ use std::collections::HashMap;
kind = "WasmCloudHostConfig",
group = "k8s.wasmcloud.dev",
version = "v1alpha1",
shortname = "chc",
shortname = "whc",
namespaced,
status = "WasmCloudHostConfigStatus",
printcolumn = r#"{"name":"App Count", "type":"integer", "jsonPath":".status.app_count"}"#
)]
#[serde(rename_all = "camelCase")]
pub struct WasmCloudHostConfigSpec {
/// The number of replicas to use for the wasmCloud host Deployment.
#[serde(default = "default_host_replicas")]
pub host_replicas: u32,
pub issuers: Vec<String>,
/// DEPRECATED: A list of cluster issuers to use when provisioning hosts. See
/// https://wasmcloud.com/docs/deployment/security/zero-trust-invocations for more information.
#[deprecated(since = "0.3.1", note = "Removed in wasmcloud 1.0.0")]
pub issuers: Option<Vec<String>>,
/// The lattice to use for these hosts.
pub lattice: String,
pub host_labels: Option<HashMap<String, String>>,
/// An optional set of labels to apply to these hosts.
pub host_labels: Option<BTreeMap<String, String>>,
/// The version of the wasmCloud host to deploy.
pub version: String,
pub secret_name: String,
/// The image to use for the wasmCloud host.
/// If not provided, the default image for the version will be used.
/// Also if provided, the version field will be ignored.
pub image: Option<String>,
/// The image to use for the NATS leaf that is deployed alongside the wasmCloud host.
/// If not provided, the default upstream image will be used.
/// If provided, it should be fully qualified by including the image tag.
pub nats_leaf_image: Option<String>,
/// Optional. The name of a secret containing a set of NATS credentials under 'nats.creds' key.
pub secret_name: Option<String>,
/// Enable structured logging for host logs.
pub enable_structured_logging: Option<bool>,
/// Name of a secret containing the registry credentials
pub registry_credentials_secret: Option<String>,
pub resources: Option<WasmCloudHostConfigResources>,
/// The control topic prefix to use for the host.
pub control_topic_prefix: Option<String>,
/// The leaf node domain to use for the NATS sidecar. Defaults to "leaf".
#[serde(default = "default_leaf_node_domain")]
pub leaf_node_domain: String,
/// Enable the config service for this host.
#[serde(default)]
pub config_service_enabled: bool,
/// The address of the NATS server to connect to. Defaults to "nats://nats.default.svc.cluster.local".
#[serde(default = "default_nats_address")]
pub nats_address: String,
/// The port of the NATS server to connect to. Defaults to 4222.
#[serde(default = "default_nats_port")]
pub nats_client_port: u16,
/// The port of the NATS server to connect to for leaf node connections. Defaults to 7422.
#[serde(default = "default_nats_leafnode_port")]
pub nats_leafnode_port: u16,
/// The Jetstream domain to use for the NATS sidecar. Defaults to "default".
#[serde(default = "default_jetstream_domain")]
pub jetstream_domain: String,
/// Allow the host to deploy using the latest tag on OCI components or providers
#[serde(default)]
pub allow_latest: bool,
/// Allow the host to pull artifacts from OCI registries insecurely
#[serde(default)]
pub allowed_insecure: Option<Vec<String>>,
/// The log level to use for the host. Defaults to "INFO".
#[serde(default = "default_log_level")]
pub log_level: String,
pub policy_service: Option<PolicyService>,
/// Kubernetes scheduling options for the wasmCloud host.
pub scheduling_options: Option<KubernetesSchedulingOptions>,
/// Observability options for configuring the OpenTelemetry integration
pub observability: Option<ObservabilityConfiguration>,
/// Certificates: Authorities, client certificates
pub certificates: Option<WasmCloudHostCertificates>,
/// wasmCloud secrets topic prefix, must not be empty if set.
pub secrets_topic_prefix: Option<String>,
/// Maximum memory in bytes that components can use.
pub max_linear_memory_bytes: Option<u32>,
}
#[derive(Serialize, Deserialize, Clone, Debug, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct PolicyService {
pub topic: Option<String>,
pub timeout_ms: Option<u32>,
pub changes_topic: Option<String>,
}
#[derive(Serialize, Deserialize, Clone, Debug, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct KubernetesSchedulingOptions {
/// Run hosts as a DaemonSet instead of a Deployment.
#[serde(default)]
pub daemonset: bool,
/// Kubernetes resources to allocate for the host. See
/// https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ for valid
/// values to use here.
pub resources: Option<WasmCloudHostConfigResources>,
/// Any other pod template spec options to set for the underlying wasmCloud host pods.
#[schemars(schema_with = "pod_schema")]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub pod_template_additions: Option<PodSpec>,
/// Allow for customization of either the wasmcloud or nats leaf container inside of the wasmCloud host pod.
pub container_template_additions: Option<ContainerTemplateAdditions>,
}
#[derive(Serialize, Deserialize, Clone, Debug, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct ContainerTemplateAdditions {
#[schemars(schema_with = "container_schema")]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub nats: Option<Container>,
#[schemars(schema_with = "container_schema")]
#[serde(default, skip_serializing_if = "Option::is_none")]
pub wasmcloud: Option<Container>,
}
#[derive(Serialize, Deserialize, Clone, Debug, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct ObservabilityConfiguration {
#[serde(default)]
pub enable: bool,
pub endpoint: String,
pub protocol: Option<OtelProtocol>,
pub logs: Option<OtelSignalConfiguration>,
pub metrics: Option<OtelSignalConfiguration>,
pub traces: Option<OtelSignalConfiguration>,
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub enum OtelProtocol {
Grpc,
Http,
}
impl std::fmt::Display for OtelProtocol {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{}",
match self {
OtelProtocol::Grpc => "grpc",
OtelProtocol::Http => "http",
}
)
}
}
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
#[serde(rename_all = "camelCase")]
pub struct OtelSignalConfiguration {
pub enable: Option<bool>,
pub endpoint: Option<String>,
}
/// This is a workaround for the fact that we can't override the PodSpec schema to make containers
/// an optional field. It generates the OpenAPI schema for the PodSpec type the same way that
/// kube.rs does while dropping any required fields.
fn pod_schema(_gen: &mut SchemaGenerator) -> Schema {
let gen = schemars::gen::SchemaSettings::openapi3()
.with(|s| {
s.inline_subschemas = true;
s.meta_schema = None;
})
.with_visitor(kube::core::schema::StructuralSchemaRewriter)
.into_generator();
let mut val = gen.into_root_schema_for::<PodSpec>();
// Drop `containers` as a required field, along with any others.
val.schema.object.as_mut().unwrap().required = BTreeSet::new();
val.schema.into()
}
/// This is a workaround for the fact that we can't override the Container schema to make name
/// an optional field. It generates the OpenAPI schema for the Container type the same way that
/// kube.rs does while dropping any required fields.
fn container_schema(_gen: &mut SchemaGenerator) -> Schema {
let gen = schemars::gen::SchemaSettings::openapi3()
.with(|s| {
s.inline_subschemas = true;
s.meta_schema = None;
})
.with_visitor(kube::core::schema::StructuralSchemaRewriter)
.into_generator();
let mut val = gen.into_root_schema_for::<Container>();
// Drop `name` as a required field as it will be filled in from container
// definition coming the controller that this configuration gets merged into.
val.schema.object.as_mut().unwrap().required = BTreeSet::new();
val.schema.into()
}
fn default_host_replicas() -> u32 {
1
}
fn default_jetstream_domain() -> String {
@ -55,6 +217,19 @@ fn default_log_level() -> String {
"INFO".to_string()
}
fn default_nats_port() -> u16 {
4222
}
fn default_nats_leafnode_port() -> u16 {
7422
}
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
pub struct WasmCloudHostCertificates {
pub authorities: Option<Vec<Volume>>,
}
#[derive(Serialize, Deserialize, Clone, Debug, JsonSchema)]
pub struct WasmCloudHostConfigResources {
pub nats: Option<ResourceRequirements>,

View File

@ -15,7 +15,7 @@ spec:
spec:
serviceAccountName: wasmcloud-operator
containers:
- image: ghcr.io/wasmcloud/wasmcloud-operator:latest
- image: ghcr.io/wasmcloud/wasmcloud-operator:0.5.0
imagePullPolicy: Always
name: wasmcloud-operator
ports:
@ -49,6 +49,7 @@ rules:
- services
- configmaps
- serviceaccounts
- pods
verbs:
- get
- list
@ -61,6 +62,7 @@ rules:
- apps
resources:
- deployments
- daemonsets
verbs:
- get
- list
@ -103,6 +105,17 @@ rules:
- list
- patch
- update
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- create
- delete
- get
- list
- patch
- update
- apiGroups:
- k8s.wasmcloud.dev
resources:

View File

@ -1,3 +1,8 @@
- op: add
path: /spec/template/spec/containers/0/image
value: localhost:5001/wasmcloud-operator:latest
- op: replace
path: /spec/template/spec/containers/0/env/0
value:
name: RUST_LOG
value: info,controller::services=debug,async_nats=warn,controller::controller=debug

View File

@ -0,0 +1,97 @@
apiVersion: k8s.wasmcloud.dev/v1alpha1
kind: WasmCloudHostConfig
metadata:
name: my-wasmcloud-cluster
namespace: default
spec:
# Optional: Number of hosts (pods). Defaults to 1.
hostReplicas: 1
# Required: The lattice to connect the hosts to.
lattice: default
# Optional: Additional labels to apply to the host other than the defaults set in the controller.
hostLabels:
test: value
cluster: kind
# Required: Which wasmCloud version to use.
version: "1.0.4"
# Optional: The image to use for the wasmCloud host.
# If provided, the 'version' field will be ignored.
image: "registry/wasmcloud:tag"
# Optional: The image to use for the NATS leaf that is deployed alongside the wasmCloud host.
# If not provided, the default upstream image will be used.
natsLeafImage: "registry/nats:tag"
# Optional. The name of a secret containing a set of NATS credentials under 'nats.creds' key.
secretName: "wasmcloud-host-nats-secret"
# Optional: Enable structured logging for host logs. Defaults to "false".
enableStructuredLogging: true
# Optional: The name of a secret containing the registry credentials.
# See https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#create-a-secret-by-providing-credentials-on-the-command-line
registryCredentialsSecret: "wasmcloud-pull-secret"
# Optional: The control topic prefix to use for the host. Defaults to "wasmbus.ctl"
controlTopicPrefix: "wasmbus.custom-ctl"
# Optional: The leaf node domain to use for the NATS sidecar. Defaults to "leaf".
leafNodeDomain: "custom-leaf"
# Optional: Enable the config service for this host. Defaults to "false".
# Makes wasmCloud host issue requests to a config service on startup.
configServiceEnabled: true
# Optional: The log level to use for the host. Defaults to "INFO".
logLevel: INFO
# Optional: The address of the NATS server to connect to. Defaults to "nats://nats.default.svc.cluster.local".
natsAddress: nats://nats.default.svc.cluster.local
# Optional: Allow the host to deploy using the latest tag on OCI components or providers. Defaults to "false".
allowLatest: true
# Optional: Allow the host to pull artifacts from OCI registries insecurely.
allowedInsecure:
- "localhost:5001"
- "kind-registry:5000"
# Optional: Policy service configuration.
policyService:
# If provided, enables policy checks on start actions and component invocations.
topic: "wasmcloud.policy"
# If provided, allows the host to subscribe to updates on past policy decisions. Requires 'topic' above to be set.
changesTopic: "wasmcloud.policy.changes"
# If provided, allows setting a custom timeout for requesting policy decisions. Defaults to 1000. Requires 'topic' to be set.
timeoutMs: 10000
# Optional: Observability options for configuring the OpenTelemetry integration.
observability:
# NOTE: Enables all signals (logs/metrics/traces) at once. Set it to 'false' and enable each signal individually in case you don't need all of them.
enable: true
endpoint: "otel-collector.svc"
# Either 'grpc' or 'http'
protocol: "http"
logs:
enable: false
endpoint: "logs-specific-otel-collector.svc"
metrics:
enable: false
endpoint: "metrics-specific-otel-collector.svc"
traces:
enable: false
endpoint: "traces-specific-otel-collector.svc"
# Optional: Subject prefix that will be used by the host to query for wasmCloud Secrets.
# See https://wasmcloud.com/docs/concepts/secrets for more context
secretsTopicPrefix: "wasmcloud.secrets"
# Optional: The maximum amount of memory bytes that a component can allocate.
maxLinearMemoryBytes: 20000000
# Optional: Additional options to control how the underlying wasmCloud hosts are scheduled in Kubernetes.
# This includes setting resource requirements for the nats and wasmCloud host
# containers along with any additional pot template settings.
schedulingOptions:
# Optional: Enable the following to run the wasmCloud hosts as a DaemonSet. Defaults to "false".
daemonset: true
# Optional: Set the resource requirements for the nats and wasmCloud host containers.
# See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ for valid values
resources:
nats:
requests:
cpu: "1"
wasmCloudHost:
requests:
cpu: "1"
# Optional: Any additional pod template settings to apply to the wasmCloud host pods.
# See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podspec-v1-core for all valid options.
# Note that you *cannot* set the `containers` field here as it is managed by the controller.
podTemplateAdditions:
spec:
nodeSelector:
kubernetes.io/os: linux

View File

@ -0,0 +1,87 @@
# Example setup
This example shows the bare minimum requirements to deploy applications on wasmCloud.
It relies on the Kubernetes `default` namespace for simplicity.
## Install [NATS](https://github.com/nats-io/nats-server)
```bash
helm repo add nats https://nats-io.github.io/k8s/helm/charts/
helm upgrade --install -f nats-values.yaml nats nats/nats
```
Validate installation with:
```bash
# make sure pods are ready
kubectl rollout status deploy,sts -l app.kubernetes.io/instance=nats
```
## Install wasmCloud Application Deployment Manager - [wadm](https://github.com/wasmCloud/wadm)
```sh
helm install wadm -f wadm-values.yaml oci://ghcr.io/wasmcloud/charts/wadm
```
Validate installation with:
```bash
# make sure pods are ready
kubectl rollout status deploy -l app.kubernetes.io/instance=wadm
```
## Install the operator
```sh
kubectl apply -k ../../deploy/base
```
Validate installation with:
```bash
# make sure pods are ready
kubectl rollout status deploy -l app=wasmcloud-operator -n wasmcloud-operator
# apiservice should be available
kubectl get apiservices.apiregistration.k8s.io v1beta1.core.oam.dev
```
## Create wasmcloud cluster
```bash
kubectl apply -f wasmcloud-host.yaml
```
Check wasmCloud host status with:
```bash
kubectl describe wasmcloudhostconfig wasmcloud-host
```
## Managing applications using kubectl
Install the rust hello world application:
```bash
kubectl apply -f hello-world-application.yaml
```
Check application status with:
```bash
kubectl get applications
```
## Managing applications with wash
Port forward into the NATS cluster. 4222 = NATS Service, 4223 = NATS Websockets
```bash
kubectl port-forward svc/nats 4222:4222 4223:4223
```
In another shell:
```bash
wash app list
```

View File

@ -0,0 +1,53 @@
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: hello-world
annotations:
version: v0.0.1
description: "HTTP hello world demo in Rust, using the WebAssembly Component Model and WebAssembly Interfaces Types (WIT)"
wasmcloud.dev/authors: wasmCloud team
wasmcloud.dev/source-url: https://github.com/wasmCloud/wasmCloud/blob/main/examples/rusg/components/http-hello-world/wadm.yaml
wasmcloud.dev/readme-md-url: https://github.com/wasmCloud/wasmCloud/blob/main/examples/rusg/components/http-hello-world/README.md
wasmcloud.dev/homepage: https://github.com/wasmCloud/wasmCloud/tree/main/examples/rusg/components/http-hello-world
wasmcloud.dev/categories: |
http,http-server,rust,hello-world,example
spec:
components:
- name: http-component
type: component
properties:
image: ghcr.io/wasmcloud/components/http-hello-world-rust:0.1.0
traits:
# Govern the spread/scheduling of the component
- type: daemonscaler
properties:
replicas: 100
# Add a capability provider that enables HTTP access
- name: httpserver
type: capability
properties:
image: ghcr.io/wasmcloud/http-server:0.21.0
traits:
# Establish a unidirectional link from this http server provider (the "source")
# to the `http-component` component (the "target") so the component can handle incoming HTTP requests.
#
# The source (this provider) is configured such that the HTTP server listens on 0.0.0.0:8000.
# When running the application on Kubernetes with the wasmCloud operator, you can change the
# port but the address must be 0.0.0.0.
- type: link
properties:
target: http-component
namespace: wasi
package: http
interfaces: [incoming-handler]
source_config:
- name: default-http
properties:
address: 0.0.0.0:8000
# When running the application on Kubernetes with the wasmCloud operator,
# the operator automatically creates a Kubernetes service for applications that use
# the httpserver provider with a daemonscaler.
- type: daemonscaler
properties:
replicas: 1

View File

@ -0,0 +1,16 @@
config:
cluster:
enabled: true
replicas: 3
leafnodes:
enabled: true
websocket:
enabled: true
port: 4223
jetstream:
enabled: true
fileStore:
pvc:
size: 10Gi
merge:
domain: default

View File

@ -0,0 +1,6 @@
wadm:
image:
tag: v0.12.2
config:
nats:
server: "nats.default.svc.cluster.local:4222"

View File

@ -0,0 +1,7 @@
apiVersion: k8s.wasmcloud.dev/v1alpha1
kind: WasmCloudHostConfig
metadata:
name: wasmcloud-host
spec:
lattice: default
version: "1.0.4"

64
hack/run-kind-cluster.sh Executable file
View File

@ -0,0 +1,64 @@
#!/bin/sh
set -o errexit
# 1. Create registry container unless it already exists
reg_name='kind-registry'
reg_port='5001'
if [ "$(docker inspect -f '{{.State.Running}}' "${reg_name}" 2>/dev/null || true)" != 'true' ]; then
docker run \
-d --restart=always -p "127.0.0.1:${reg_port}:5000" --name "${reg_name}" \
registry:2
fi
# 2. Create kind cluster with containerd registry config dir enabled
# TODO: kind will eventually enable this by default and this patch will
# be unnecessary.
#
# See:
# https://github.com/kubernetes-sigs/kind/issues/2875
# https://github.com/containerd/containerd/blob/main/docs/cri/config.md#registry-configuration
# See: https://github.com/containerd/containerd/blob/main/docs/hosts.md
cat <<EOF | kind create cluster --image kindest/node:v1.29.4 --config=-
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
containerdConfigPatches:
- |-
[plugins."io.containerd.grpc.v1.cri".registry]
config_path = "/etc/containerd/certs.d"
EOF
# 3. Add the registry config to the nodes
#
# This is necessary because localhost resolves to loopback addresses that are
# network-namespace local.
# In other words: localhost in the container is not localhost on the host.
#
# We want a consistent name that works from both ends, so we tell containerd to
# alias localhost:${reg_port} to the registry container when pulling images
REGISTRY_DIR="/etc/containerd/certs.d/localhost:${reg_port}"
for node in $(kind get nodes); do
docker exec "${node}" mkdir -p "${REGISTRY_DIR}"
cat <<EOF | docker exec -i "${node}" cp /dev/stdin "${REGISTRY_DIR}/hosts.toml"
[host."http://${reg_name}:5000"]
EOF
done
# 4. Connect the registry to the cluster network if not already connected
# This allows kind to bootstrap the network but ensures they're on the same network
if [ "$(docker inspect -f='{{json .NetworkSettings.Networks.kind}}' "${reg_name}")" = 'null' ]; then
docker network connect "kind" "${reg_name}"
fi
# 5. Document the local registry
# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: ConfigMap
metadata:
name: local-registry-hosting
namespace: kube-public
data:
localRegistryHosting.v1: |
host: "localhost:${reg_port}"
help: "https://kind.sigs.k8s.io/docs/user/local-registry/"
EOF

View File

@ -1,20 +0,0 @@
apiVersion: k8s.wasmcloud.dev/v1alpha1
kind: WasmCloudHostConfig
metadata:
name: my-wasmcloud-cluster
namespace: default
spec:
hostReplicas: 2
issuers:
- CDKF6OKPOBQKAX57UOXO7SCHURTOZWKWIVPC2HFJTGFXY5VJX44ECEHH
# The lattice to connect the hosts to
lattice: 83a5b52e-17cf-4080-bac8-f844099f142e
# Additional labels to apply to the host other than the defaults set in the controller
hostLabels:
test: value
# Which wasmCloud version to use
version: 0.81.0
# The name of a secret in the same namespace that provides the required secrets.
secretName: cluster-secrets
logLevel: INFO
natsAddress: nats://nats-cluster.default.svc.cluster.local

13
src/config.rs Normal file
View File

@ -0,0 +1,13 @@
use serde::{Deserialize, Serialize};
/// Configuration for the operator. If you are configuring the operator using environment variables
/// then all values need to be prefixed with "WASMCLOUD_OPERATOR".
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
pub struct OperatorConfig {
#[serde(default = "default_stream_replicas")]
pub stream_replicas: u16,
}
fn default_stream_replicas() -> u16 {
1
}

File diff suppressed because it is too large Load Diff

View File

@ -31,6 +31,9 @@ pub enum Error {
#[error("Error retrieving secrets: {0}")]
SecretError(String),
#[error("Certificate error: {0}")]
CertificateError(String),
#[error("Error rendering template: {0}")]
RenderError(#[from] RenderError),
}
@ -44,6 +47,7 @@ impl IntoResponse for Error {
}
}
pub mod config;
pub mod controller;
pub mod discovery;
pub mod docker_secret;
@ -51,6 +55,7 @@ pub mod header;
pub(crate) mod openapi;
pub mod resources;
pub mod router;
pub(crate) mod services;
pub(crate) mod table;
pub use crate::controller::*;

View File

@ -1,7 +1,8 @@
use anyhow::{anyhow, Result};
use axum_server::{tls_rustls::RustlsConfig, Handle};
use controller::{State, WasmcloudConfig};
use controller::{config::OperatorConfig, State};
use config::Config;
use k8s_openapi::apiextensions_apiserver::pkg::apis::apiextensions::v1::CustomResourceDefinition;
use k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta;
use k8s_openapi::kube_aggregator::pkg::apis::apiregistration::v1::{
@ -12,10 +13,12 @@ use kube::{
client::Client,
CustomResourceExt,
};
use opentelemetry::sdk::{
trace::{self, RandomIdGenerator, Sampler},
Resource as OTELResource,
use opentelemetry::KeyValue;
use opentelemetry_sdk::{
trace::{RandomIdGenerator, Sampler},
Resource,
};
use std::io::IsTerminal;
use std::net::SocketAddr;
use std::time::Duration;
use tracing::{error, info};
@ -25,14 +28,27 @@ use wasmcloud_operator_types::v1alpha1::WasmCloudHostConfig;
#[tokio::main]
async fn main() -> Result<()> {
let args = std::env::args().collect::<Vec<_>>();
if args.iter().any(|arg| arg == "-V" || arg == "--version") {
let version = version();
println!("{} {version}", env!("CARGO_BIN_NAME"));
std::process::exit(0);
}
let tracing_enabled = std::env::var("OTEL_EXPORTER_OTLP_ENDPOINT").is_ok();
configure_tracing(tracing_enabled).map_err(|e| {
error!("Failed to configure tracing: {}", e);
e
})?;
info!("Starting controller");
info!("Starting operator");
let config = WasmcloudConfig {};
let cfg = Config::builder()
.add_source(config::Environment::with_prefix("WASMCLOUD_OPERATOR"))
.build()
.map_err(|e| anyhow!("Failed to build config: {}", e))?;
let config: OperatorConfig = cfg
.try_deserialize()
.map_err(|e| anyhow!("Failed to parse config: {}", e))?;
let client = Client::try_default().await?;
install_crd(&client).await?;
@ -72,12 +88,12 @@ fn configure_tracing(enabled: bool) -> anyhow::Result<()> {
.tracing()
.with_exporter(opentelemetry_otlp::new_exporter().tonic())
.with_trace_config(
trace::config()
opentelemetry_sdk::trace::config()
.with_sampler(Sampler::AlwaysOn)
.with_id_generator(RandomIdGenerator::default())
.with_max_attributes_per_span(32)
.with_max_events_per_span(32)
.with_resource(OTELResource::new(vec![opentelemetry::KeyValue::new(
.with_resource(Resource::new(vec![KeyValue::new(
"service.name",
"wasmcloud-operator",
)])),
@ -87,7 +103,7 @@ fn configure_tracing(enabled: bool) -> anyhow::Result<()> {
let env_filter_layer = tracing_subscriber::EnvFilter::from_default_env();
let log_layer = tracing_subscriber::fmt::layer()
.with_writer(std::io::stderr)
.with_ansi(atty::is(atty::Stream::Stderr));
.with_ansi(std::io::stderr().is_terminal());
let otel_layer = tracing_opentelemetry::layer().with_tracer(tracer);
@ -183,3 +199,7 @@ async fn install_crd(client: &Client) -> anyhow::Result<()> {
Ok(())
}
fn version() -> &'static str {
option_env!("CARGO_VERSION_INFO").unwrap_or(env!("CARGO_PKG_VERSION"))
}

File diff suppressed because it is too large Load Diff

View File

@ -2,7 +2,7 @@ use std::collections::{BTreeMap, HashMap, HashSet};
use std::sync::Arc;
use anyhow::{anyhow, Error};
use async_nats::ConnectOptions;
use async_nats::{Client as NatsClient, ConnectError, ConnectOptions};
use axum::{
body::Bytes,
extract::{Path, State as AxumState},
@ -12,21 +12,21 @@ use axum::{
};
use kube::{
api::{Api, ListParams},
client::Client,
client::Client as KubeClient,
core::{ListMeta, ObjectMeta},
};
use secrecy::{ExposeSecret, SecretString};
use serde::Serialize;
use serde_json::json;
use serde_json::{json, Value};
use tokio::sync::RwLock;
use tracing::error;
use uuid::Uuid;
use wadm::{
model::Manifest,
server::{
DeleteResult, DeployResult, GetResult, ModelSummary, PutResult, StatusResult, StatusType,
},
use wadm_client::{error::ClientError, Client as WadmClient};
use wadm_types::{
api::{ModelSummary, Status, StatusType},
Manifest,
};
use wasmcloud_operator_types::v1alpha1::WasmCloudHostConfig;
use crate::{
@ -46,6 +46,8 @@ use crate::{
*/
const GROUP_VERSION: &str = "core.oam.dev/v1beta1";
const KUBECTL_LAST_APPLIED_CONFIG_ANNOTATION: &str =
"kubectl.kubernetes.io/last-applied-configuration";
pub struct AppError(Error);
@ -237,20 +239,17 @@ impl From<Vec<ModelSummary>> for ApplicationTable {
}
}
impl From<Vec<Manifest>> for ApplicationTable {
fn from(manifests: Vec<Manifest>) -> Self {
impl From<Vec<CombinedManifest>> for ApplicationTable {
fn from(manifests: Vec<CombinedManifest>) -> Self {
let mut table = Self::default();
let rows = manifests
.into_iter()
.map(|m| TableRow {
.map(|cm| TableRow {
cells: vec![
m.metadata.name,
"N/A".to_string(),
match m.metadata.annotations.get("version") {
Some(v) => v.to_owned(),
None => "N/A".to_string(),
},
"N/A".to_string(),
cm.name(),
cm.deployed_version(),
cm.latest_version(),
cm.status(),
],
})
.collect();
@ -260,6 +259,42 @@ impl From<Vec<Manifest>> for ApplicationTable {
}
}
struct CombinedManifest {
manifest: Manifest,
status: Status,
}
impl CombinedManifest {
pub(crate) fn new(manifest: Manifest, status: Status) -> Self {
Self { manifest, status }
}
pub(crate) fn name(&self) -> String {
self.manifest.metadata.name.to_owned()
}
pub(crate) fn deployed_version(&self) -> String {
match self.manifest.metadata.annotations.get("version") {
Some(v) => v.to_owned(),
None => "N/A".to_string(),
}
}
pub(crate) fn latest_version(&self) -> String {
self.status.version.to_owned()
}
pub(crate) fn status(&self) -> String {
match self.status.info.status_type {
StatusType::Undeployed => "Undeployed",
StatusType::Reconciling => "Reconciling",
StatusType::Deployed => "Deployed",
StatusType::Failed => "Failed",
}
.to_string()
}
}
#[utoipa::path(
post,
path = "/apis/core.oam.dev/v1beta1/namespaces/{namespace}/applications"
@ -269,110 +304,37 @@ pub async fn create_application(
AxumState(state): AxumState<State>,
body: Bytes,
) -> impl IntoResponse {
let client = match Client::try_default().await {
let kube_client = match KubeClient::try_default().await {
Ok(c) => c,
Err(e) => return internal_error(anyhow!("unable to initialize kubernetes client: {}", e)),
};
let configs: Api<WasmCloudHostConfig> = Api::namespaced(client, &namespace);
let configs: Api<WasmCloudHostConfig> = Api::namespaced(kube_client, &namespace);
let cfgs = match configs.list(&ListParams::default()).await {
Ok(objs) => objs,
Err(e) => return internal_error(anyhow!("Unable to list cosmonic host configs: {}", e)),
};
let mut cfgs = cfgs.iter();
// TODO(joonas): Remove this once we move to pulling NATS creds+secrets from lattice instead of hosts.
let mut lattice_id = String::new();
let nats_client = loop {
let Some(cfg) = cfgs.next() else {
return internal_error(anyhow!(
"unable to find Host Config to use for initializing nats client "
));
let (nats_client, lattice_id) =
match get_lattice_connection(cfgs.into_iter(), state, namespace).await {
Ok(data) => data,
Err(resp) => return resp,
};
let cluster_url = cfg.spec.nats_address.clone();
lattice_id = cfg.spec.lattice.clone();
let lattice_name = cfg.metadata.name.clone().unwrap();
let nst: NameNamespace = NameNamespace::new(lattice_name.clone(), namespace.clone());
match get_client(&cluster_url, state.nats_creds.clone(), nst).await {
Ok(c) => break Some(c),
Err(e) => {
error!("error connecting to nats: {}", e);
continue;
}
};
};
let Some(nats_client) = nats_client else {
return internal_error(anyhow!("unable to initialize nats client"));
};
let wadm_client = WadmClient::from_nats_client(&lattice_id, None, nats_client);
let model: serde_json::Value = match serde_json::from_slice(&body) {
let manifest: Manifest = match serde_json::from_slice(&body) {
Ok(v) => v,
Err(e) => return internal_error(anyhow!("unable to decode the patch: {}", e)),
};
let put =
match wash_lib::app::put_model(&nats_client, Some(lattice_id.clone()), &model.to_string())
.await
{
Ok(res) => res,
let (application_name, _application_version) =
match wadm_client.put_and_deploy_manifest(manifest).await {
Ok(application_bits) => application_bits,
Err(e) => return internal_error(anyhow!("could not deploy app: {}", e)),
};
let model_name = match put.result {
PutResult::Created | PutResult::NewVersion => put.name,
_ => {
// TODO(joonas): Add handling for the case where the model version
// might already exist (from prior deploy or otherwise).
return internal_error(anyhow!(
"unexpected response from wadm: result={:?}, message={}",
put.result,
put.message
));
}
};
let deploy = match wash_lib::app::deploy_model(
&nats_client,
Some(lattice_id.clone()),
&model_name,
Some(put.current_version.clone()),
)
.await
{
Ok(res) => res,
Err(e) => return internal_error(anyhow!("could not deploy app: {}", e)),
};
if deploy.result != DeployResult::Acknowledged {
return internal_error(anyhow!(
"unexpected response from wadm: result={:?}, message={}",
deploy.result,
deploy.message
));
}
// Get model from WADM for displaying in Kubernetes
let get = match wash_lib::app::get_model_details(
&nats_client,
Some(lattice_id),
&model_name,
Some(put.current_version),
)
.await
{
Ok(res) => res,
Err(e) => return internal_error(anyhow!("error getting deployed app: {}", e)),
};
match get.result {
GetResult::Success => Json(Application::new(model_name)).into_response(),
// Either we received an error or could not find the deployed application, so return an error:
_ => internal_error(anyhow!(
"unexpected response from wadm: result={:?}, message={}",
get.result,
get.message
)),
}
Json(Application::new(application_name)).into_response()
}
#[utoipa::path(get, path = "/apis/core.oam.dev/v1beta1/applications")]
@ -383,12 +345,12 @@ pub async fn list_all_applications(
// TODO(joonas): Use lattices (or perhaps Controller specific/special creds) for instanciating NATS client.
// TODO(joonas): Add watch support to stop Argo from spamming this endpoint every second.
let client = match Client::try_default().await {
let kube_client = match KubeClient::try_default().await {
Ok(c) => c,
Err(e) => return internal_error(anyhow!("unable to initialize kubernetes client: {}", e)),
};
let configs: Api<WasmCloudHostConfig> = Api::all(client);
let configs: Api<WasmCloudHostConfig> = Api::all(kube_client);
let cfgs = match configs.list(&ListParams::default()).await {
Ok(objs) => objs,
Err(e) => return internal_error(anyhow!("Unable to list cosmonic host configs: {}", e)),
@ -405,7 +367,14 @@ pub async fn list_all_applications(
let secret = map.get(&nst);
// Prevent listing applications within a given lattice more than once
if !lattices.contains(&lattice_id) {
let result = match list_apps(&cfg.spec.nats_address, secret, lattice_id.clone()).await {
let result = match list_apps(
&cfg.spec.nats_address,
&cfg.spec.nats_client_port,
secret,
lattice_id.clone(),
)
.await
{
Ok(apps) => apps,
Err(e) => return internal_error(anyhow!("unable to list applications: {}", e)),
};
@ -439,11 +408,11 @@ pub async fn list_applications(
Path(namespace): Path<String>,
AxumState(state): AxumState<State>,
) -> impl IntoResponse {
let client = match Client::try_default().await {
let kube_client = match KubeClient::try_default().await {
Ok(c) => c,
Err(e) => return internal_error(anyhow!("unable to initialize kubernetes client: {}", e)),
};
let configs: Api<WasmCloudHostConfig> = Api::namespaced(client, &namespace);
let configs: Api<WasmCloudHostConfig> = Api::namespaced(kube_client, &namespace);
let cfgs = match configs.list(&ListParams::default()).await {
Ok(objs) => objs,
Err(e) => return internal_error(anyhow!("Unable to list cosmonic host configs: {}", e)),
@ -459,7 +428,14 @@ pub async fn list_applications(
let secret = map.get(&nst);
// This is to check that we don't list a lattice more than once
if !lattices.contains(&lattice_id) {
let result = match list_apps(&cfg.spec.nats_address, secret, lattice_id.clone()).await {
let result = match list_apps(
&cfg.spec.nats_address,
&cfg.spec.nats_client_port,
secret,
lattice_id.clone(),
)
.await
{
Ok(apps) => apps,
Err(e) => return internal_error(anyhow!("unable to list applications: {}", e)),
};
@ -485,37 +461,40 @@ pub async fn list_applications(
pub async fn list_apps(
cluster_url: &str,
port: &u16,
creds: Option<&SecretString>,
lattice_id: String,
) -> Result<Vec<ModelSummary>, Error> {
let client = match creds {
let addr = format!("{}:{}", cluster_url, port);
let nats_client = match creds {
Some(creds) => {
ConnectOptions::with_credentials(creds.expose_secret())?
.connect(cluster_url)
.connect(addr)
.await?
}
None => ConnectOptions::new().connect(cluster_url).await?,
None => ConnectOptions::new().connect(addr).await?,
};
let models = wash_lib::app::get_models(&client, Some(lattice_id)).await?;
Ok(models)
let wadm_client = WadmClient::from_nats_client(&lattice_id, None, nats_client);
Ok(wadm_client.list_manifests().await?)
}
async fn get_client(
pub async fn get_nats_client(
cluster_url: &str,
port: &u16,
nats_creds: Arc<RwLock<HashMap<NameNamespace, SecretString>>>,
namespace: NameNamespace,
) -> Result<async_nats::Client, async_nats::ConnectError> {
) -> Result<NatsClient, ConnectError> {
let addr = format!("{}:{}", cluster_url, port);
let creds = nats_creds.read().await;
match creds.get(&namespace) {
Some(creds) => {
let creds = creds.expose_secret();
ConnectOptions::with_credentials(creds)
.expect("unable to create nats client")
.connect(cluster_url)
.connect(addr)
.await
}
None => ConnectOptions::new().connect(cluster_url).await,
None => ConnectOptions::new().connect(addr).await,
}
}
@ -528,119 +507,77 @@ pub async fn get_application(
Path((namespace, name)): Path<(String, String)>,
AxumState(state): AxumState<State>,
) -> impl IntoResponse {
let client = match Client::try_default().await {
let kube_client = match KubeClient::try_default().await {
Ok(c) => c,
Err(e) => return internal_error(anyhow!("unable to initialize kubernetes client: {}", e)),
};
let configs: Api<WasmCloudHostConfig> = Api::namespaced(client, &namespace);
let configs: Api<WasmCloudHostConfig> = Api::namespaced(kube_client, &namespace);
let cfgs = match configs.list(&ListParams::default()).await {
Ok(objs) => objs,
Err(e) => return internal_error(anyhow!("unable to list cosmonic host configs: {}", e)),
};
let mut cfgs = cfgs.iter();
// TODO(joonas): Remove this once we move to pulling NATS creds+secrets from lattice instead of hosts.
let mut lattice_id = String::new();
let nats_client = loop {
let Some(cfg) = cfgs.next() else {
return internal_error(anyhow!(
"unable to find Host Config to use for initializing nats client "
));
let (nats_client, lattice_id) =
match get_lattice_connection(cfgs.into_iter(), state, namespace.clone()).await {
Ok(data) => data,
Err(resp) => return resp,
};
let cluster_url = cfg.spec.nats_address.clone();
lattice_id = cfg.spec.lattice.clone();
let lattice_name = cfg.metadata.name.clone().unwrap();
let nst: NameNamespace = NameNamespace::new(lattice_name.clone(), namespace.clone());
match get_client(&cluster_url, state.nats_creds.clone(), nst).await {
Ok(c) => break Some(c),
Err(e) => {
error!("error connecting to nats: {}", e);
continue;
let wadm_client = WadmClient::from_nats_client(&lattice_id, None, nats_client);
let manifest = match wadm_client.get_manifest(&name, None).await {
Ok(m) => m,
Err(e) => match e {
ClientError::NotFound(_) => {
return not_found_error(anyhow!("applications \"{}\" not found", name))
}
};
_ => return internal_error(anyhow!("unable to request app from wadm: {}", e)),
},
};
let Some(nats_client) = nats_client else {
return internal_error(anyhow!("unable to initialize nats client"));
};
let get =
match wash_lib::app::get_model_details(&nats_client, Some(lattice_id.clone()), &name, None)
.await
{
Ok(res) => res,
Err(e) => {
return internal_error(anyhow!("unable to request app from wadm: {}", e));
let status = match wadm_client.get_manifest_status(&name).await {
Ok(s) => s,
Err(e) => match e {
ClientError::NotFound(_) => {
return not_found_error(anyhow!("applications \"{}\" not found", name))
}
};
let status = match wash_lib::app::get_model_status(
&nats_client,
Some(lattice_id.clone()),
&name,
)
.await
{
Ok(res) => res,
Err(e) => {
return internal_error(anyhow!("unable to request app status from wadm: {}", e));
}
_ => return internal_error(anyhow!("unable to request app status from wadm: {}", e)),
},
};
match status.result {
StatusResult::Ok => {}
StatusResult::NotFound => {
return not_found_error(anyhow!("applications \"{}\" not found", name));
match accept.into() {
As::Table => {
let combined_manifest = CombinedManifest::new(manifest, status);
Json(ApplicationTable::from(vec![combined_manifest])).into_response()
}
StatusResult::Error => {
return internal_error(anyhow!(
"unexpected response from wadm: result={:?}, message={}",
status.result,
status.message
));
}
};
if get.result == GetResult::Success {
if let Some(manifest) = get.manifest {
let response = match accept.into() {
As::Table => Json(ApplicationTable::from(vec![manifest])).into_response(),
As::NotSpecified => {
// TODO(joonas): This is a terrible hack, but for now it's what we need to do to satisfy Argo/Kubernetes since WADM doesn't support this metadata.
let mut manifest_value = serde_json::to_value(&manifest).unwrap();
// TODO(joonas): We should add lattice id to this as well, but we need it in every place where the application is listed.
let ns = format!("{}/{}", &name, &manifest.version());
let uid = Uuid::new_v5(&Uuid::NAMESPACE_OID, ns.as_bytes());
manifest_value["metadata"]["uid"] = json!(uid.to_string());
manifest_value["metadata"]["resourceVersion"] = json!(uid.to_string());
manifest_value["metadata"]["namespace"] = json!(namespace);
manifest_value["metadata"]["labels"] = json!({
"app.kubernetes.io/instance": &name
});
// TODO(joonas): refactor status and the metadata inputs into a struct we could just serialize
// The custom health check we provide for Argo will handle the case where status is missing, so this is fine for now.
if let Some(status) = status.status {
let phase = match status.info.status_type {
StatusType::Undeployed => "Undeployed",
StatusType::Reconciling => "Reconciling",
StatusType::Deployed => "Deployed",
StatusType::Failed => "Failed",
};
manifest_value["status"] = json!({
"phase": phase,
});
}
Json(manifest_value).into_response()
}
// TODO(joonas): Add better error handling here
t => return internal_error(anyhow!("unknown type: {}", t)),
As::NotSpecified => {
// TODO(joonas): This is a terrible hack, but for now it's what we need to do to satisfy Argo/Kubernetes since WADM doesn't support this metadata.
let mut manifest_value = serde_json::to_value(&manifest).unwrap();
// TODO(joonas): We should add lattice id to this as well, but we need it in every place where the application is listed.
let ns = format!("{}/{}", &name, &manifest.version());
let uid = Uuid::new_v5(&Uuid::NAMESPACE_OID, ns.as_bytes());
manifest_value["metadata"]["uid"] = json!(uid.to_string());
manifest_value["metadata"]["resourceVersion"] = json!(uid.to_string());
manifest_value["metadata"]["namespace"] = json!(namespace);
manifest_value["metadata"]["labels"] = json!({
"app.kubernetes.io/instance": &name
});
// TODO(joonas): refactor status and the metadata inputs into a struct we could just serialize
// The custom health check we provide for Argo will handle the case where status is missing, so this is fine for now.
let phase = match status.info.status_type {
StatusType::Undeployed => "Undeployed",
StatusType::Reconciling => "Reconciling",
StatusType::Deployed => "Deployed",
StatusType::Failed => "Failed",
};
return response;
manifest_value["status"] = json!({
"phase": phase,
});
Json(manifest_value).into_response()
}
};
not_found_error(anyhow!("applications \"{}\" not found", name))
// TODO(joonas): Add better error handling here
t => internal_error(anyhow!("unknown type: {}", t)),
}
}
#[utoipa::path(
@ -652,123 +589,102 @@ pub async fn patch_application(
AxumState(state): AxumState<State>,
body: Bytes,
) -> impl IntoResponse {
let client = match Client::try_default().await {
let kube_client = match KubeClient::try_default().await {
Ok(c) => c,
Err(e) => return internal_error(anyhow!("unable to initialize kubernetes client: {}", e)),
};
let configs: Api<WasmCloudHostConfig> = Api::namespaced(client, &namespace);
let configs: Api<WasmCloudHostConfig> = Api::namespaced(kube_client, &namespace);
let cfgs = match configs.list(&ListParams::default()).await {
Ok(objs) => objs,
Err(e) => return internal_error(anyhow!("unable to list cosmonic host configs: {}", e)),
};
let mut cfgs = cfgs.iter();
// TODO(joonas): Remove this once we move to pulling NATS creds+secrets from lattice instead of hosts.
let mut lattice_id = String::new();
let nats_client = loop {
let Some(cfg) = cfgs.next() else {
return internal_error(anyhow!(
"unable to find Host Config to use for initializing nats client "
));
let (nats_client, lattice_id) =
match get_lattice_connection(cfgs.into_iter(), state, namespace).await {
Ok(data) => data,
Err(resp) => return resp,
};
let cluster_url = cfg.spec.nats_address.clone();
lattice_id = cfg.spec.lattice.clone();
let lattice_name = cfg.metadata.name.clone().unwrap();
let nst: NameNamespace = NameNamespace::new(lattice_name.clone(), namespace.clone());
match get_client(&cluster_url, state.nats_creds.clone(), nst).await {
Ok(c) => break Some(c),
Err(e) => {
error!("error connecting to nats: {}", e);
continue;
let wadm_client = WadmClient::from_nats_client(&lattice_id, None, nats_client);
let current_manifest = match wadm_client.get_manifest(&name, None).await {
Ok(m) => m,
Err(e) => match e {
ClientError::NotFound(_) => {
return not_found_error(anyhow!("applications \"{}\" not found", name))
}
};
_ => return internal_error(anyhow!("unable to request app from wadm: {}", e)),
},
};
let Some(nats_client) = nats_client else {
return internal_error(anyhow!("unable to initialize nats client"));
};
// Fist, check if the model exists.
// TODO(joonas): we should likely fetch the version of the manifest that's running in Kubernetes
// TODO(joonas): Should this use model.status instead of model.get?
let get =
match wash_lib::app::get_model_details(&nats_client, Some(lattice_id.clone()), &name, None)
.await
{
Ok(res) => res,
Err(e) => {
return internal_error(anyhow!("unable to find app: {}", e));
}
};
if get.result != GetResult::Success {
return internal_error(anyhow!(
"unexpected response from wadm: result={:?}, reason={}",
get.result,
get.message
));
}
// Prepare manifest for patching
let Some(manifest) = get.manifest else {
return internal_error(anyhow!("no manifest was found for app: {}", &name));
};
let mut model = serde_json::to_value(manifest).unwrap();
let mut current = serde_json::to_value(current_manifest).unwrap();
// Parse the Kubernetes-provided RFC 7386 patch
let patch: serde_json::Value = match serde_json::from_slice(&body) {
let patch = match serde_json::from_slice::<Value>(&body) {
Ok(p) => p,
Err(e) => return internal_error(anyhow!("unable to decode the patch: {}", e)),
};
// Attempt to patch the currently running version
json_patch::merge(&mut model, &patch);
// Remove kubectl.kubernetes.io/last-applied-configuration annotation before
// we compare against the patch, otherwise we'll always end up creating a new version.
let last_applied_configuration = current
.get_mut("metadata")
.and_then(|metadata| metadata.get_mut("annotations"))
.and_then(|annotations| annotations.as_object_mut())
.and_then(|annotations| annotations.remove(KUBECTL_LAST_APPLIED_CONFIG_ANNOTATION));
let put =
match wash_lib::app::put_model(&nats_client, Some(lattice_id.clone()), &model.to_string())
.await
{
Ok(res) => res,
Err(e) => {
return internal_error(anyhow!("could not update manifest for deploy: {}", e))
}
};
// TODO(joonas): This doesn't quite work as intended at the moment,
// there are some differences in terms like replicas vs. instances:
// * Add(AddOperation { path: "/spec/components/0/traits/0/properties/replicas", value: Number(1) }),
// * Remove(RemoveOperation { path: "/spec/components/0/traits/0/properties/instances" }),
//
// which cause the server to always patch. Also, top-level entries such
// as apiVersion, kind and metadata are always removed.
//
// let diff = json_patch::diff(&current, &patch);
// if diff.is_empty() {
// // If there's nothing to patch, return early.
// return Json(()).into_response();
// };
match put.result {
PutResult::NewVersion => {}
_ => {
// For now we have to check the error message to see if we can continue,
// despite getting an error.
if !put.message.contains("already exists") {
return internal_error(anyhow!(
"unexpected response from wadm: result={:?}, message={}",
put.result,
put.message
));
}
}
};
let deploy = match wash_lib::app::deploy_model(
&nats_client,
Some(lattice_id),
&name,
Some(put.current_version),
)
.await
// Remove current version so that either a new version is generated,
// or the one set in the incoming patch gets used.
if let Some(annotations) = current
.get_mut("metadata")
.and_then(|metadata| metadata.get_mut("annotations"))
.and_then(|annotations| annotations.as_object_mut())
{
Ok(res) => res,
Err(e) => return internal_error(anyhow!("could not deploy app: {}", e)),
annotations.remove("version");
}
// Attempt to patch the currently running version
json_patch::merge(&mut current, &patch);
// Re-insert "kubectl.kubernetes.io/last-applied-configuration" if one was set
if let Some(last_applied_config) = last_applied_configuration {
if let Some(annotations) = current
.get_mut("metadata")
.and_then(|metadata| metadata.get_mut("annotations"))
.and_then(|annotations| annotations.as_object_mut())
{
annotations.insert(
KUBECTL_LAST_APPLIED_CONFIG_ANNOTATION.to_string(),
last_applied_config,
);
}
}
let updated_manifest = match serde_json::from_value::<Manifest>(current) {
Ok(m) => m,
Err(e) => return internal_error(anyhow!("unable to patch the application: {}", e)),
};
match deploy.result {
DeployResult::Acknowledged => Json(Application::new(name)).into_response(),
DeployResult::Error => internal_error(anyhow!(
"unexpected response from wadm: result={:?}, message={}",
deploy.result,
deploy.message
)),
DeployResult::NotFound => not_found_error(anyhow!("applications \"{}\" not found", &name)),
match wadm_client.put_and_deploy_manifest(updated_manifest).await {
Ok((app_name, _)) => Json(Application::new(app_name)).into_response(),
Err(e) => match e {
ClientError::NotFound(_) => {
not_found_error(anyhow!("applications \"{}\" not found", &name))
}
_ => internal_error(anyhow!("could not update application: {}", e)),
},
}
}
@ -780,95 +696,60 @@ pub async fn delete_application(
Path((namespace, name)): Path<(String, String)>,
AxumState(state): AxumState<State>,
) -> impl IntoResponse {
let client = match Client::try_default().await {
let kube_client = match KubeClient::try_default().await {
Ok(c) => c,
Err(e) => return internal_error(anyhow!("unable to initialize kubernetes client: {}", e)),
};
let configs: Api<WasmCloudHostConfig> = Api::namespaced(client, &namespace);
let configs: Api<WasmCloudHostConfig> = Api::namespaced(kube_client, &namespace);
let cfgs = match configs.list(&ListParams::default()).await {
Ok(objs) => objs,
Err(e) => return internal_error(anyhow!("unable to list cosmonic host configs: {}", e)),
};
let mut cfgs = cfgs.iter();
// TODO(joonas): Remove this once we move to pulling NATS creds+secrets from lattice instead of hosts.
let mut lattice_id = String::new();
let nats_client = loop {
let Some(cfg) = cfgs.next() else {
return internal_error(anyhow!(
"unable to find Host Config to use for initializing nats client "
));
let (nats_client, lattice_id) =
match get_lattice_connection(cfgs.into_iter(), state, namespace).await {
Ok(data) => data,
Err(resp) => return resp,
};
let cluster_url = cfg.spec.nats_address.clone();
lattice_id = cfg.spec.lattice.clone();
let lattice_name = cfg.metadata.name.clone().unwrap();
let nst: NameNamespace = NameNamespace::new(lattice_name.clone(), namespace.clone());
match get_client(&cluster_url, state.nats_creds.clone(), nst).await {
Ok(c) => break Some(c),
let wadm_client = WadmClient::from_nats_client(&lattice_id, None, nats_client);
match wadm_client.delete_manifest(&name, None).await {
Ok(_) => Json(Application::new(name)).into_response(),
Err(e) => match e {
ClientError::NotFound(_) => not_found_error(anyhow!("apps \"{}\" not found", name)),
_ => internal_error(anyhow!("could not delete app: {}", e)),
},
}
}
async fn get_lattice_connection(
cfgs: impl Iterator<Item = WasmCloudHostConfig>,
state: State,
namespace: String,
) -> Result<(NatsClient, String), Response> {
let connection_data =
cfgs.map(|cfg| (cfg, namespace.clone()))
.filter_map(|(cfg, namespace)| {
let cluster_url = cfg.spec.nats_address;
let lattice_id = cfg.spec.lattice;
let lattice_name = cfg.metadata.name?;
let nst: NameNamespace = NameNamespace::new(lattice_name, namespace);
let port = cfg.spec.nats_client_port;
Some((cluster_url, nst, lattice_id, port))
});
for (cluster_url, ns, lattice_id, port) in connection_data {
match get_nats_client(&cluster_url, &port, state.nats_creds.clone(), ns).await {
Ok(c) => return Ok((c, lattice_id)),
Err(e) => {
error!("error connecting to nats: {}", e);
error!(err = %e, %lattice_id, "error connecting to nats");
continue;
}
};
};
let Some(nats_client) = nats_client else {
return internal_error(anyhow!("unable to initialize nats client"));
};
// Fist, check if the model exists.
// TODO(joonas): Replace this with wash_lib::app::get_model_status once
// https://github.com/wasmCloud/wasmCloud/pull/1151 ships.
let status = match wash_lib::app::get_model_status(
&nats_client,
Some(lattice_id.clone()),
&name,
)
.await
{
Ok(res) => res,
Err(e) => {
return internal_error(anyhow!("unable to request app status from wadm: {}", e));
}
};
match status.result {
StatusResult::Ok => {
// Proceed
}
StatusResult::NotFound => {
return not_found_error(anyhow!("apps \"{}\" not found", name));
}
StatusResult::Error => {
return internal_error(anyhow!(
"unexpected response from status command: result={:?}, message={}",
status.result,
status.message
));
}
};
let delete = match wash_lib::app::delete_model_version(
&nats_client,
Some(lattice_id),
&name,
None,
true,
)
.await
{
Ok(res) => res,
Err(e) => return internal_error(anyhow!("error deleting app: {}", e)),
};
match delete.result {
// TODO(joonas): do we need to handle DeleteResult::Noop differently?
DeleteResult::Deleted | DeleteResult::Noop => Json(Application::new(name)).into_response(),
DeleteResult::Error => internal_error(anyhow!(
"unexpected response from wadm: result={:?}, message={}",
delete.result,
delete.message
)),
}
// If we get here, we couldn't get a NATS client, so return an error
Err(internal_error(anyhow!("unable to initialize nats client")))
}

794
src/services.rs Normal file
View File

@ -0,0 +1,794 @@
use std::collections::{BTreeMap, HashMap, HashSet};
use std::net::SocketAddr;
use std::sync::Arc;
use anyhow::Result;
use async_nats::{
jetstream,
jetstream::{
consumer::{pull::Config, Consumer},
stream::{Config as StreamConfig, RetentionPolicy, Source, StorageType, SubjectTransform},
AckKind,
},
Client,
};
use cloudevents::{AttributesReader, Event as CloudEvent};
use futures::StreamExt;
use k8s_openapi::api::core::v1::{Pod, Service, ServicePort, ServiceSpec};
use k8s_openapi::api::discovery::v1::{Endpoint, EndpointConditions, EndpointPort, EndpointSlice};
use k8s_openapi::apimachinery::pkg::apis::meta::v1::OwnerReference;
use kube::{
api::{Api, DeleteParams, ListParams, Patch, PatchParams},
client::Client as KubeClient,
Resource,
};
use tokio::sync::{mpsc, RwLock};
use tokio_util::sync::CancellationToken;
use tracing::{debug, error, warn};
use wadm::events::{Event, ManifestPublished, ManifestUnpublished};
use wadm_client::Client as WadmClient;
use wadm_types::{api::ModelSummary, Component, Manifest, Properties, Trait, TraitProperty};
use wasmcloud_operator_types::v1alpha1::WasmCloudHostConfig;
use crate::controller::{
common_labels, CLUSTER_CONFIG_FINALIZER, SERVICE_FINALIZER,
WASMCLOUD_OPERATOR_HOST_LABEL_PREFIX, WASMCLOUD_OPERATOR_MANAGED_BY_LABEL_REQUIREMENT,
};
const CONSUMER_PREFIX: &str = "wasmcloud_operator_service";
// This should probably be exposed by wadm somewhere
const WADM_EVENT_STREAM_NAME: &str = "wadm_events";
const OPERATOR_STREAM_NAME: &str = "wasmcloud_operator_events";
const OPERATOR_STREAM_SUBJECT: &str = "wasmcloud_operator_events.*.>";
/// Commands that can be sent to the watcher to trigger an update or removal of a service.
#[derive(Clone, Debug)]
enum WatcherCommand {
UpsertService(ServiceParams),
RemoveService {
name: String,
namespaces: HashSet<String>,
},
RemoveServices {
namespaces: HashSet<String>,
},
}
/// Parameters for creating or updating a service in the cluster.
#[derive(Clone, Debug)]
pub struct ServiceParams {
name: String,
namespaces: HashSet<String>,
lattice_id: String,
port: u16,
host_labels: Option<HashMap<String, String>>,
}
/// Watches for new services to be created in the cluster for a partcular lattice and creates or
/// updates them as necessary.
#[derive(Clone, Debug)]
pub struct Watcher {
namespaces: HashSet<String>,
lattice_id: String,
nats_client: Client,
shutdown: CancellationToken,
consumer: Consumer<Config>,
tx: mpsc::UnboundedSender<WatcherCommand>,
}
impl Drop for Watcher {
fn drop(&mut self) {
self.shutdown.cancel();
}
}
impl Watcher {
/// Creates a new watcher for a particular lattice.
fn new(
namespace: String,
lattice_id: String,
nats_client: Client,
consumer: Consumer<Config>,
tx: mpsc::UnboundedSender<WatcherCommand>,
) -> Self {
let watcher = Self {
namespaces: HashSet::from([namespace]),
nats_client,
lattice_id: lattice_id.clone(),
consumer,
shutdown: CancellationToken::new(),
tx,
};
// TODO is there a better way to handle this?
let watcher_dup = watcher.clone();
tokio::spawn(async move {
tokio::select! {
_ = watcher_dup.shutdown.cancelled() => {
debug!(%lattice_id, "Service watcher shutting down for lattice");
}
_ = watcher_dup.watch_events(&watcher_dup.consumer) => {
error!(%lattice_id, "Service watcher for lattice has stopped");
}
}
});
watcher
}
/// Watches for new events on the mirrored wadm_events stream and processes them.
async fn watch_events(&self, consumer: &Consumer<Config>) -> Result<()> {
let mut messages = consumer.stream().messages().await?;
while let Some(message) = messages.next().await {
if let Ok(message) = message {
match self.handle_event(message.clone()) {
Ok(_) => message
.ack()
.await
.map_err(|e| {
error!(err=%e, "Error acking message");
e
})
.ok(),
Err(_) => message
.ack_with(AckKind::Nak(None))
.await
.map_err(|e| {
error!(err=%e, "Error nacking message");
e
})
.ok(),
};
}
}
Ok(())
}
/// Handles a new event from the consumer.
fn handle_event(&self, message: async_nats::jetstream::Message) -> Result<()> {
let event = serde_json::from_slice::<CloudEvent>(&message.payload)
.map_err(|e| anyhow::anyhow!("Error parsing cloudevent: {}", e))?;
let evt = match Event::try_from(event.clone()) {
Ok(evt) => evt,
Err(e) => {
warn!(
err=%e,
event_type=%event.ty(),
"Error converting cloudevent to wadm event",
);
return Ok(());
}
};
match evt {
Event::ManifestPublished(mp) => {
let name = mp.manifest.metadata.name.clone();
self.handle_manifest_published(mp).map_err(|e| {
error!(lattice_id = %self.lattice_id, manifest = name, "Error handling manifest published event: {}", e);
e
})?;
}
Event::ManifestUnpublished(mu) => {
let name = mu.name.clone();
self.handle_manifest_unpublished(mu).map_err(|e| {
error!(lattice_id = %self.lattice_id, manifest = name, "Error handling manifest unpublished event: {}", e);
e
})?;
}
_ => {}
}
Ok(())
}
/// Handles a manifest published event.
fn handle_manifest_published(&self, mp: ManifestPublished) -> Result<()> {
debug!(manifest=?mp, "Handling manifest published event");
let manifest = mp.manifest;
if let Some(httpserver_service) = http_server_component(&manifest) {
if let Ok(addr) = httpserver_service.address.parse::<SocketAddr>() {
debug!(manifest = %manifest.metadata.name, "Upserting service for manifest");
self.tx
.send(WatcherCommand::UpsertService(ServiceParams {
name: manifest.metadata.name.clone(),
lattice_id: self.lattice_id.clone(),
port: addr.port(),
namespaces: self.namespaces.clone(),
host_labels: httpserver_service.labels,
}))
.map_err(|e| anyhow::anyhow!("Error sending command to watcher: {}", e))?;
} else {
error!(
address = httpserver_service.address,
"Invalid address in manifest"
);
}
}
Ok(())
}
/// Handles a manifest unpublished event.
fn handle_manifest_unpublished(&self, mu: ManifestUnpublished) -> Result<()> {
self.tx
.send(WatcherCommand::RemoveService {
name: mu.name,
namespaces: self.namespaces.clone(),
})
.map_err(|e| anyhow::anyhow!("Error sending command to watcher: {}", e))?;
Ok(())
}
}
/// Waits for commands to update or remove services based on manifest deploy/undeploy events in
/// underlying lattices.
/// Each lattice is managed by a [`Watcher`] which listens for events relayed by a NATS consumer and
/// issues commands to create or update services in the cluster.
pub struct ServiceWatcher {
watchers: Arc<RwLock<HashMap<String, Watcher>>>,
sender: mpsc::UnboundedSender<WatcherCommand>,
stream_replicas: u16,
}
impl ServiceWatcher {
/// Creates a new service watcher.
pub fn new(k8s_client: KubeClient, stream_replicas: u16) -> Self {
let (tx, mut rx) = mpsc::unbounded_channel::<WatcherCommand>();
let client = k8s_client.clone();
tokio::spawn(async move {
while let Some(cmd) = rx.recv().await {
match cmd {
WatcherCommand::UpsertService(params) => {
create_or_update_service(client.clone(), &params, None)
.await
.map_err(|e| error!(err=%e, "Error creating/updating service"))
.ok();
}
WatcherCommand::RemoveService { name, namespaces } => {
for namespace in namespaces {
delete_service(client.clone(), &namespace, name.as_str())
.await
.map_err(|e| error!(err=%e, %namespace, "Error deleting service"))
.ok();
}
}
WatcherCommand::RemoveServices { namespaces } => {
for namespace in namespaces {
delete_services(client.clone(), namespace.as_str())
.await
.map_err(|e| error!(err=%e, %namespace, "Error deleting service"))
.ok();
}
}
}
}
});
Self {
watchers: Arc::new(RwLock::new(HashMap::new())),
sender: tx,
stream_replicas,
}
}
/// Reconciles services for a set of apps in a lattice.
/// This intended to be called by the controller whenever it reconciles state.
pub async fn reconcile_services(&self, apps: Vec<ModelSummary>, lattice_id: String) {
if let Some(watcher) = self.watchers.read().await.get(lattice_id.as_str()) {
let wadm_client =
WadmClient::from_nats_client(&lattice_id, None, watcher.nats_client.clone());
for app in apps {
if app.deployed_version.is_none() {
continue;
}
match wadm_client
.get_manifest(app.name.as_str(), app.deployed_version.as_deref())
.await
{
Ok(manifest) => {
let _ = watcher.handle_manifest_published(ManifestPublished {
manifest,
}).map_err(|e| error!(err = %e, %lattice_id, app = %app.name, "failed to trigger service reconciliation for app"));
}
Err(e) => warn!(err=%e, "Unable to retrieve model"),
};
}
};
}
/// Create a new [`Watcher`] for a lattice.
/// It will return early if a [`Watcher`] already exists for the lattice.
pub async fn watch(&self, client: Client, namespace: String, lattice_id: String) -> Result<()> {
// If we're already watching this lattice then return early
// TODO is there an easy way to do this with a read lock?
let mut watchers = self.watchers.write().await;
if let Some(watcher) = watchers.get_mut(lattice_id.as_str()) {
watcher.namespaces.insert(namespace);
return Ok(());
}
let js = jetstream::new(client.clone());
// Should we also be doing this when we first create the ServiceWatcher?
let stream = js
.get_or_create_stream(StreamConfig {
name: OPERATOR_STREAM_NAME.to_string(),
description: Some(
"Stream for wadm events consumed by the wasmCloud K8s Operator".to_string(),
),
max_age: wadm::DEFAULT_EXPIRY_TIME,
retention: RetentionPolicy::WorkQueue,
storage: StorageType::File,
allow_rollup: false,
num_replicas: self.stream_replicas as usize,
mirror: Some(Source {
name: WADM_EVENT_STREAM_NAME.to_string(),
subject_transforms: vec![SubjectTransform {
source: wadm::DEFAULT_WADM_EVENTS_TOPIC.to_string(),
destination: OPERATOR_STREAM_SUBJECT.replacen('*', "{{wildcard(1)}}", 1),
}],
..Default::default()
}),
..Default::default()
})
.await?;
let consumer_name = format!("{CONSUMER_PREFIX}-{}", lattice_id.clone());
let consumer = stream
.get_or_create_consumer(
consumer_name.as_str(),
Config {
durable_name: Some(consumer_name.clone()),
description: Some("Consumer created by the wasmCloud K8s Operator to watch for new service endpoints in wadm manifests".to_string()),
ack_policy: jetstream::consumer::AckPolicy::Explicit,
ack_wait: std::time::Duration::from_secs(2),
max_deliver: 3,
deliver_policy: async_nats::jetstream::consumer::DeliverPolicy::All,
filter_subject: OPERATOR_STREAM_SUBJECT.replacen('*', &lattice_id, 1),
..Default::default()
},
)
.await?;
let watcher = Watcher::new(
namespace,
lattice_id.clone(),
client.clone(),
consumer,
self.sender.clone(),
);
watchers.insert(lattice_id.clone(), watcher);
Ok(())
}
/// Stops watching a lattice by stopping the underlying [`Watcher`] if no namespaces require it.
pub async fn stop_watch(&self, lattice_id: String, namespace: String) -> Result<()> {
let mut watchers = self.watchers.write().await;
if let Some(watcher) = watchers.get_mut(lattice_id.as_str()) {
watcher.namespaces.remove(namespace.as_str());
if watcher.namespaces.is_empty() {
watchers.remove(lattice_id.as_str());
}
self.sender
.send(WatcherCommand::RemoveServices {
namespaces: HashSet::from([namespace]),
})
.map_err(|e| anyhow::anyhow!("Error sending command to watcher: {}", e))?;
}
Ok(())
}
}
/// Creates or updates a service in the cluster based on the provided parameters.
pub async fn create_or_update_service(
k8s_client: KubeClient,
params: &ServiceParams,
owner_ref: Option<OwnerReference>,
) -> Result<()> {
let mut labels = common_labels();
labels.extend(BTreeMap::from([(
"app.kubernetes.io/name".to_string(),
params.name.to_string(),
)]));
let mut selector = BTreeMap::new();
let mut create_endpoints = false;
if let Some(host_labels) = &params.host_labels {
selector.insert(
"app.kubernetes.io/name".to_string(),
"wasmcloud".to_string(),
);
selector.extend(
host_labels
.iter()
.map(|(k, v)| (format_service_selector(k), v.clone())),
);
} else {
create_endpoints = true;
}
for namespace in params.namespaces.iter() {
let api = Api::<Service>::namespaced(k8s_client.clone(), namespace);
let mut svc = Service {
metadata: kube::api::ObjectMeta {
name: Some(params.name.clone()),
labels: Some(labels.clone()),
finalizers: Some(vec![SERVICE_FINALIZER.to_string()]),
namespace: Some(namespace.clone()),
..Default::default()
},
spec: Some(ServiceSpec {
selector: Some(selector.clone()),
ports: Some(vec![ServicePort {
name: Some("http".to_string()),
port: params.port as i32,
protocol: Some("TCP".to_string()),
..Default::default()
}]),
..Default::default()
}),
..Default::default()
};
if let Some(owner_ref) = &owner_ref {
svc.metadata.owner_references = Some(vec![owner_ref.clone()]);
}
debug!(service =? svc, %namespace, "Creating/updating service");
let svc = api
.patch(
params.name.as_str(),
&PatchParams::apply(SERVICE_FINALIZER),
&Patch::Apply(svc),
)
.await
.map_err(|e| {
error!(err = %e, "Error creating/updating service");
e
})?;
if create_endpoints {
let crds =
Api::<WasmCloudHostConfig>::namespaced(k8s_client.clone(), namespace.as_str());
let pods = Api::<Pod>::namespaced(k8s_client.clone(), namespace.as_str());
let endpoints =
Api::<EndpointSlice>::namespaced(k8s_client.clone(), namespace.as_str());
let configs = crds.list(&ListParams::default()).await?;
let mut ips = vec![];
for cfg in configs {
if cfg.spec.lattice == params.lattice_id {
let name = cfg.metadata.name.unwrap();
let pods = pods
.list(&ListParams {
label_selector: Some(format!(
"app.kubernetes.io/name=wasmcloud,app.kubernetes.io/instance={name}"
)),
..Default::default()
})
.await?;
let pod_ips = pods
.into_iter()
.filter_map(|pod| {
pod.status.and_then(|status| {
if status.phase == Some("Running".to_string()) {
status.pod_ips
} else {
None
}
})
})
.flatten();
ips.extend(pod_ips);
}
}
// Create an EndpointSlice if we're working with a daemonscaler without label requirements.
// This means we need to manually map the endpoints to each wasmCloud host belonging to the
// lattice in this namespace.
// TODO: This can actually span namespaces, same with the label requirements so should we
// be querying _all_ CRDs to find all available pods?
if !ips.is_empty() {
let mut labels = labels.clone();
labels.insert(
"kubernetes.io/service-name".to_string(),
params.name.clone(),
);
let endpoint_slice = EndpointSlice {
metadata: kube::api::ObjectMeta {
name: Some(params.name.clone()),
labels: Some(labels.clone()),
// SAFETY: This should be safe according to the kube.rs docs, which specifiy
// that anything created through the apiserver should have a populated field
// here.
owner_references: Some(vec![svc.controller_owner_ref(&()).unwrap()]),
..Default::default()
},
// TODO is there a way to figure this out automatically? Maybe based on the number
// of IPs that come back or what they are
address_type: "IPv4".to_string(),
endpoints: ips
.iter()
.filter_map(|ip| {
ip.ip.as_ref().map(|i| Endpoint {
addresses: vec![i.clone()],
conditions: Some(EndpointConditions {
ready: Some(true),
serving: Some(true),
terminating: None,
}),
hostname: None,
target_ref: None,
..Default::default()
})
})
.collect(),
ports: Some(vec![EndpointPort {
name: Some("http".to_string()),
port: Some(params.port as i32),
protocol: Some("TCP".to_string()),
app_protocol: None,
}]),
};
// TODO this should probably do the usual get/patch or get/replce bit since I don't
// think this is fully syncing endpoints when pods are deleted. Also we should update
// this based on pod status since we may end up having stale IPs
endpoints
.patch(
params.name.as_str(),
&PatchParams::apply(CLUSTER_CONFIG_FINALIZER),
&Patch::Apply(endpoint_slice),
)
.await
.map_err(|e| {
error!("Error creating endpoint slice: {}", e);
e
})?;
}
};
}
debug!("Created/updated service");
Ok(())
}
#[derive(Default)]
pub struct HttpServerComponent {
labels: Option<HashMap<String, String>>,
address: String,
}
/// Finds the httpserver component in a manifest and returns the details needed to create a service
fn http_server_component(manifest: &Manifest) -> Option<HttpServerComponent> {
let components: Vec<&Component> = manifest
.components()
// filter just for the wasmCloud httpserver for now. This should actually just filter for
// the http capability
.filter(|c| {
if let Properties::Capability { properties } = &c.properties {
if properties
.image
.starts_with("ghcr.io/wasmcloud/http-server")
{
return true;
}
}
false
})
.collect();
let scalers: Vec<&Trait> = components
.iter()
.filter_map(|c| {
if let Some(t) = &c.traits {
for trait_ in t {
if trait_.trait_type == "daemonscaler" {
return Some(trait_);
}
}
};
None
})
.collect();
// Right now we only support daemonscalers, so if we don't find any then we have nothing to do
if scalers.is_empty() {
return None;
}
let links: Vec<&Trait> = components
.iter()
.filter_map(|c| {
if let Some(t) = &c.traits {
for trait_ in t {
if trait_.trait_type == "link" {
return Some(trait_);
}
}
};
None
})
.collect();
let mut details = HttpServerComponent::default();
let mut should_create_service = false;
for l in links {
match &l.properties {
TraitProperty::Link(props) => {
if props.namespace == "wasi"
&& props.package == "http"
&& props.interfaces.contains(&"incoming-handler".to_string())
&& props.source.is_some()
{
let source = props.source.as_ref().unwrap();
for cp in source.config.iter() {
if let Some(addr) = cp.properties.as_ref().and_then(|p| p.get("address")) {
details.address.clone_from(addr);
should_create_service = true;
}
}
}
}
TraitProperty::SpreadScaler(scaler) => {
for spread in scaler.spread.iter() {
spread.requirements.iter().for_each(|(k, v)| {
details
.labels
.get_or_insert_with(HashMap::new)
.insert(k.clone(), v.clone());
});
}
}
_ => {}
}
}
if should_create_service {
return Some(details);
}
None
}
/// Deletes a service in the cluster.
async fn delete_service(k8s_client: KubeClient, namespace: &str, name: &str) -> Result<()> {
debug!(namespace = namespace, name = name, "Deleting service");
let api = Api::<Service>::namespaced(k8s_client.clone(), namespace);
// Remove the finalizer so that the service can be deleted
let mut svc = api.get(name).await?;
svc.metadata.finalizers = None;
svc.metadata.managed_fields = None;
api.patch(
name,
&PatchParams::apply(SERVICE_FINALIZER).force(),
&Patch::Apply(svc),
)
.await
.map_err(|e| {
error!("Error removing finalizer from service: {}", e);
e
})?;
api.delete(name, &DeleteParams::default()).await?;
Ok(())
}
async fn delete_services(k8s_client: KubeClient, namespace: &str) -> Result<()> {
let api = Api::<Service>::namespaced(k8s_client.clone(), namespace);
let services = api
.list(&ListParams {
label_selector: Some(WASMCLOUD_OPERATOR_MANAGED_BY_LABEL_REQUIREMENT.to_string()),
..Default::default()
})
.await?;
for svc in services {
let name = svc.metadata.name.unwrap();
delete_service(k8s_client.clone(), namespace, name.as_str()).await?;
}
Ok(())
}
/// Formats a service selector for a given name.
fn format_service_selector(name: &str) -> String {
format!("{WASMCLOUD_OPERATOR_HOST_LABEL_PREFIX}/{}", name)
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_daemonscaler_should_return() {
let manifest = r#"
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: rust-http-hello-world
annotations:
version: v0.0.1
description: "HTTP hello world demo in Rust, using the WebAssembly Component Model and WebAssembly Interfaces Types (WIT)"
experimental: "true"
spec:
components:
- name: http-hello-world
type: component
properties:
image: wasmcloud.azurecr.io/http-hello-world:0.1.0
id: helloworld
traits:
# Govern the spread/scheduling of the actor
- type: spreadscaler
properties:
replicas: 5000
# Add a capability provider that mediates HTTP access
- name: httpserver
type: capability
properties:
image: ghcr.io/wasmcloud/http-server:0.20.0
id: httpserver
traits:
# Link the HTTP server, and inform it to listen on port 8080
# on the local machine
- type: link
properties:
target: http-hello-world
namespace: wasi
package: http
interfaces: [incoming-handler]
source_config:
- name: default-http
properties:
address: 0.0.0.0:8080
- type: daemonscaler
properties:
replicas: 1
"#;
let m = serde_yaml::from_str::<Manifest>(manifest).unwrap();
let component = http_server_component(&m);
assert!(component.is_some());
let manifest = r#"
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: rust-http-hello-world
annotations:
version: v0.0.1
description: "HTTP hello world demo in Rust, using the WebAssembly Component Model and WebAssembly Interfaces Types (WIT)"
experimental: "true"
spec:
components:
- name: http-hello-world
type: component
properties:
image: wasmcloud.azurecr.io/http-hello-world:0.1.0
id: helloworld
traits:
# Govern the spread/scheduling of the actor
- type: spreadscaler
properties:
replicas: 5000
# Add a capability provider that mediates HTTP access
- name: httpserver
type: capability
properties:
image: ghcr.io/wasmcloud/http-server:0.20.0
id: httpserver
traits:
# Link the HTTP server, and inform it to listen on port 8080
# on the local machine
- type: link
properties:
target: http-hello-world
namespace: wasi
package: http
interfaces: [incoming-handler]
source_config:
- name: default-http
properties:
address: 0.0.0.0:8080
"#;
let m = serde_yaml::from_str::<Manifest>(manifest).unwrap();
let component = http_server_component(&m);
assert!(component.is_none());
}
}