Compare commits
121 Commits
Author | SHA1 | Date |
---|---|---|
|
a3f6886cb3 | |
|
599aec9cbc | |
|
4b8134a5f8 | |
|
9c3208ee3a | |
|
c6d4e59028 | |
|
cb051aac61 | |
|
52007e2927 | |
|
bc2d7bbea2 | |
|
d4758e98f1 | |
|
9cf6542828 | |
|
8d495ec202 | |
|
b7e4bf3761 | |
|
f9daa35480 | |
|
d74ad8bff7 | |
|
4ccfb20b30 | |
|
59552d2277 | |
|
1b129d69c9 | |
|
fff57db4fb | |
|
1c85b3937c | |
|
309b492dcd | |
|
0aaa277422 | |
|
6f63289a11 | |
|
788cbc089d | |
|
4ab407dc15 | |
|
d90e46055e | |
|
5988e643e7 | |
|
69dac08133 | |
|
2906b3f56e | |
|
ba909d170c | |
|
f7d3ddef5f | |
|
0c024e7718 | |
|
e6de0f9394 | |
|
38e77cd643 | |
|
226f9ab39e | |
|
ee120eb5d1 | |
|
26deb719ac | |
|
c30e1f8953 | |
|
97f0c12d03 | |
|
7c709465c0 | |
|
4f4bde877e | |
|
32fa6e40da | |
|
674ee31cb0 | |
|
27e83fe24c | |
|
d4a47a4d7c | |
|
0ebc5229bb | |
|
8853b526d4 | |
|
d0f810cd6c | |
|
bf8a74acfd | |
|
00bc95f293 | |
|
b3cfb7da54 | |
|
e7a5e9dc1f | |
|
845f78841a | |
|
f83dbf9215 | |
|
fa5594bab8 | |
|
7dcb409bde | |
|
b1d88813f4 | |
|
de5026f4e0 | |
|
43d087f1ad | |
|
280f2648e6 | |
|
105e2c54dc | |
|
0952050708 | |
|
5be39536f3 | |
|
3aa081ebbe | |
|
2a9db6a87a | |
|
86d26fd8cd | |
|
a0dfdff70d | |
|
cfc139f36d | |
|
6df71d1b6f | |
|
45ada5a9b2 | |
|
5d0b199ff7 | |
|
178ee27fe9 | |
|
8ccdbb784d | |
|
7a38f50847 | |
|
6b0f38ebf1 | |
|
35af8adb73 | |
|
dd22a19034 | |
|
77b8c86945 | |
|
d2266002aa | |
|
f190e50c6f | |
|
fcf4a445e3 | |
|
58b976c250 | |
|
878a008a01 | |
|
60723198aa | |
|
1aa3942b6e | |
|
4804546fd4 | |
|
cd02677b16 | |
|
a9a05905c1 | |
|
82ab69b84f | |
|
a947d144cb | |
|
72f58558bd | |
|
692d78308b | |
|
1659819c23 | |
|
9eb70ec23a | |
|
04607fdcb5 | |
|
9a9a4a8f97 | |
|
4d378f263f | |
|
8bcd13b1e1 | |
|
0121602ee7 | |
|
b93332029f | |
|
bad704df22 | |
|
fe4ee8af32 | |
|
290db9919c | |
|
58a9799b0e | |
|
e671b4f12b | |
|
8c1fa43765 | |
|
50a2d67798 | |
|
a4a0035aa2 | |
|
88369e3cdd | |
|
baee51eecc | |
|
3fa82d0857 | |
|
df435d802b | |
|
ca3527b6f3 | |
|
d9111366a2 | |
|
bdc43302b2 | |
|
1726808a14 | |
|
00adb860fc | |
|
debadb3109 | |
|
0756cdeeff | |
|
c0e782dc1b | |
|
b375604010 | |
|
72fa8af4d6 |
|
@ -0,0 +1,2 @@
|
|||
# wasmCloud operator maintainers
|
||||
* @wasmCloud/operator-maintainers
|
|
@ -0,0 +1,10 @@
|
|||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "cargo"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
|
@ -0,0 +1,9 @@
|
|||
# For more details on the available options, see:
|
||||
# https://github.com/actions/dependency-review-action?tab=readme-ov-file#configuration-options
|
||||
fail-on-severity: critical
|
||||
|
||||
comment-summary-in-pr: always
|
||||
|
||||
show-openssf-scorecard: true
|
||||
|
||||
warn-on-openssf-scorecard-level: 3
|
|
@ -16,7 +16,7 @@ jobs:
|
|||
check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Lint
|
||||
run: |
|
||||
cargo clippy -- --no-deps
|
||||
|
@ -71,7 +71,7 @@ jobs:
|
|||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@v2
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.repository_owner }}
|
||||
|
@ -86,6 +86,15 @@ jobs:
|
|||
type=sha,prefix=
|
||||
type=semver,pattern={{version}}
|
||||
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta_wolfi
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
type=sha,prefix=,suffix=-wolfi
|
||||
type=semver,pattern={{version}},suffix=-wolfi
|
||||
|
||||
- name: Load artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
|
@ -98,7 +107,7 @@ jobs:
|
|||
chmod +x artifacts/wasmcloud-operator*
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v4
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
push: true
|
||||
context: .
|
||||
|
@ -106,3 +115,14 @@ jobs:
|
|||
labels: ${{ steps.meta.outputs.labels }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
build-args: "BIN_PATH=artifacts/wasmcloud-operator"
|
||||
|
||||
- name: Build and push Docker image (wolfi)
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
push: true
|
||||
context: .
|
||||
file: './Dockerfile.wolfi'
|
||||
tags: ${{ steps.meta_wolfi.outputs.tags }}
|
||||
labels: ${{ steps.meta_wolfi.outputs.labels }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
build-args: "BIN_PATH=artifacts/wasmcloud-operator"
|
||||
|
|
|
@ -0,0 +1,96 @@
|
|||
name: chart
|
||||
|
||||
env:
|
||||
HELM_VERSION: v3.14.0
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'chart-v[0-9].[0-9]+.[0-9]+'
|
||||
pull_request:
|
||||
paths:
|
||||
- 'charts/**'
|
||||
- '.github/workflows/chart.yml'
|
||||
|
||||
jobs:
|
||||
validate:
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Fetch main branch for chart-testing
|
||||
run: |
|
||||
git fetch origin main:main
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v4
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
# Used by helm chart-testing below
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5.6.0
|
||||
with:
|
||||
python-version: '3.12.2'
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.7.0
|
||||
with:
|
||||
version: v3.10.1
|
||||
yamllint_version: 1.35.1
|
||||
yamale_version: 5.0.0
|
||||
|
||||
- name: Run chart-testing (lint)
|
||||
run: |
|
||||
ct lint --config charts/wasmcloud-operator/ct.yaml
|
||||
|
||||
- name: Create kind cluster
|
||||
uses: helm/kind-action@v1.12.0
|
||||
with:
|
||||
version: "v0.22.0"
|
||||
|
||||
- name: Run chart-testing (install)
|
||||
run: |
|
||||
ct install --config charts/wasmcloud-operator/ct.yaml
|
||||
|
||||
publish:
|
||||
if: ${{ startsWith(github.ref, 'refs/tags/chart-v') }}
|
||||
runs-on: ubuntu-22.04
|
||||
needs: validate
|
||||
permissions:
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v4
|
||||
with:
|
||||
version: ${{ env.HELM_VERSION }}
|
||||
|
||||
- name: Package
|
||||
run: |
|
||||
helm package charts/wasmcloud-operator -d .helm-charts
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Lowercase the organization name for ghcr.io
|
||||
run: |
|
||||
echo "GHCR_REPO_NAMESPACE=${GITHUB_REPOSITORY_OWNER,,}" >>${GITHUB_ENV}
|
||||
|
||||
- name: Publish
|
||||
run: |
|
||||
for chart in .helm-charts/*; do
|
||||
if [ -z "${chart:-}" ]; then
|
||||
break
|
||||
fi
|
||||
helm push "${chart}" "oci://ghcr.io/${{ env.GHCR_REPO_NAMESPACE }}/charts"
|
||||
done
|
|
@ -0,0 +1,19 @@
|
|||
name: 'Dependency Review'
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
dependency-review:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: 'Checkout Repository'
|
||||
uses: actions/checkout@v4
|
||||
- name: Dependency Review
|
||||
uses: actions/dependency-review-action@v4
|
||||
with:
|
||||
config-file: './.github/dependency-review-config.yml'
|
File diff suppressed because it is too large
Load Diff
101
Cargo.toml
101
Cargo.toml
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "wasmcloud-operator"
|
||||
version = "0.1.0"
|
||||
version = "0.5.0"
|
||||
edition = "2021"
|
||||
|
||||
[[bin]]
|
||||
|
@ -21,51 +21,64 @@ path = "src/lib.rs"
|
|||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
async-nats = {workspace = true}
|
||||
axum = {workspace = true}
|
||||
axum-server = {workspace = true}
|
||||
anyhow = {workspace = true}
|
||||
atty = {workspace = true}
|
||||
ctrlc = {workspace = true}
|
||||
futures = {workspace = true}
|
||||
handlebars = {workspace = true}
|
||||
json-patch = {workspace = true}
|
||||
k8s-openapi = {workspace = true, features = ["v1_28", "schemars"]}
|
||||
kube = {workspace = true, features = ["runtime", "derive", "default"]}
|
||||
opentelemetry = {workspace = true}
|
||||
opentelemetry-otlp = {workspace = true}
|
||||
rcgen = {workspace = true}
|
||||
schemars = {workspace = true}
|
||||
secrecy = {workspace = true}
|
||||
serde = {workspace = true}
|
||||
serde_json = {workspace = true}
|
||||
serde_yaml = {workspace = true}
|
||||
thiserror = {workspace = true}
|
||||
time = {workspace = true}
|
||||
tokio = {workspace = true}
|
||||
tracing = {workspace = true}
|
||||
tracing-opentelemetry = {workspace = true}
|
||||
tracing-subscriber = {workspace = true}
|
||||
utoipa = {workspace = true}
|
||||
uuid = {workspace = true}
|
||||
wadm = {workspace = true}
|
||||
wash-lib = {workspace = true}
|
||||
wasmcloud-operator-types = {workspace = true}
|
||||
async-nats = { workspace = true }
|
||||
axum = { workspace = true }
|
||||
axum-server = { workspace = true }
|
||||
anyhow = { workspace = true }
|
||||
ctrlc = { workspace = true }
|
||||
cloudevents-sdk = { workspace = true }
|
||||
config = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
handlebars = { workspace = true }
|
||||
json-patch = { workspace = true }
|
||||
k8s-openapi = { workspace = true, features = ["v1_28", "schemars"] }
|
||||
kube = { workspace = true, features = ["runtime", "derive", "default"] }
|
||||
opentelemetry = { workspace = true }
|
||||
opentelemetry_sdk = { workspace = true }
|
||||
opentelemetry-otlp = { workspace = true }
|
||||
rcgen = { workspace = true }
|
||||
schemars = { workspace = true }
|
||||
secrecy = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
serde_yaml = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
time = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
tokio-util = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
tracing-opentelemetry = { workspace = true }
|
||||
tracing-subscriber = { workspace = true }
|
||||
utoipa = { workspace = true }
|
||||
uuid = { workspace = true }
|
||||
wadm = { workspace = true }
|
||||
wadm-client = { workspace = true }
|
||||
wadm-types = { workspace = true }
|
||||
wasmcloud-operator-types = { workspace = true }
|
||||
|
||||
[workspace.dependencies]
|
||||
async-nats = "0.33"
|
||||
axum = { version = "0.6", features = ["headers"] }
|
||||
axum-server = { version = "0.4", features = ["tls-rustls"] }
|
||||
anyhow = "1"
|
||||
atty = "0.2"
|
||||
config = { version = "0.14", default-features = false, features = [
|
||||
"convert-case",
|
||||
"async",
|
||||
] }
|
||||
cloudevents-sdk = "0.7"
|
||||
ctrlc = "3"
|
||||
futures = "0.3"
|
||||
handlebars = "5.1"
|
||||
json-patch = "1.2.0"
|
||||
k8s-openapi = { version = "0.20", default-features = false}
|
||||
kube = { version = "0.87", default-features = false}
|
||||
opentelemetry = { version = "0.20", features = ["metrics", "trace", "rt-tokio"] }
|
||||
opentelemetry-otlp = { version = "0.13", features = ["tokio"] }
|
||||
json-patch = "1.4.0"
|
||||
k8s-openapi = { version = "0.20", default-features = false }
|
||||
kube = { version = "0.87", default-features = false }
|
||||
opentelemetry = { version = "0.21", default-features = false }
|
||||
opentelemetry_sdk = { version = "0.21", features = [
|
||||
"metrics",
|
||||
"trace",
|
||||
"rt-tokio",
|
||||
] }
|
||||
opentelemetry-otlp = { version = "0.14", features = ["tokio"] }
|
||||
rcgen = "0.11"
|
||||
schemars = "0.8"
|
||||
secrecy = "0.8"
|
||||
|
@ -75,19 +88,19 @@ serde_yaml = "0.9"
|
|||
thiserror = "1"
|
||||
time = "0.3"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
tokio-util = { version = "0.7", features = ["rt"] }
|
||||
tracing = "0.1"
|
||||
tracing-opentelemetry = "0.20"
|
||||
tracing-opentelemetry = "0.22"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter", "json"] }
|
||||
utoipa = { version = "4.1", features = ["axum_extras"] }
|
||||
uuid = { version = "1", features = ["v5"] }
|
||||
wadm = "0.10"
|
||||
wash-lib = "0.17"
|
||||
wasmcloud-operator-types = { version="*", path = "./crates/types" }
|
||||
wadm = "0.13.0"
|
||||
wadm-client = "0.2.0"
|
||||
wadm-types = "0.2.0"
|
||||
wasmcloud-operator-types = { version = "*", path = "./crates/types" }
|
||||
|
||||
[workspace]
|
||||
members = [
|
||||
"crates/*"
|
||||
]
|
||||
members = ["crates/*"]
|
||||
resolver = "2"
|
||||
|
||||
[profile.release]
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# syntax=docker/dockerfile:1
|
||||
FROM rust:1.75-bookworm as builder
|
||||
FROM rust:1.77-bookworm as builder
|
||||
|
||||
WORKDIR /app
|
||||
COPY . .
|
||||
|
|
|
@ -0,0 +1,7 @@
|
|||
# syntax=docker/dockerfile:1
|
||||
FROM chainguard/wolfi-base:latest
|
||||
ARG BIN_PATH
|
||||
ARG TARGETARCH
|
||||
|
||||
COPY ${BIN_PATH}-${TARGETARCH} /usr/local/bin/wasmcloud-operator
|
||||
ENTRYPOINT ["/usr/local/bin/wasmcloud-operator"]
|
2
LICENSE
2
LICENSE
|
@ -187,7 +187,7 @@
|
|||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
Copyright 2024 wasmCloud Maintainers
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
# MAINTAINERS
|
||||
|
||||
The following individuals are responsible for reviewing code, managing issues, and ensuring the overall quality of `wasmcloud-operator`.
|
||||
|
||||
## @wasmCloud/operator-maintainers
|
||||
|
||||
Name: Joonas Bergius
|
||||
GitHub: @joonas
|
||||
Organization: Cosmonic
|
||||
|
||||
Name: Dan Norris
|
||||
GitHub: @protochron
|
||||
Organization: Cosmonic
|
||||
|
||||
Name: Taylor Thomas
|
||||
GitHub: @thomastaylor312
|
||||
Organization: Cosmonic
|
||||
|
||||
Name: Lucas Fontes
|
||||
GitHub: @lxfontes
|
||||
Organization: Cosmonic
|
||||
|
106
README.md
106
README.md
|
@ -1,10 +1,10 @@
|
|||
# wasmcloud-operator
|
||||
|
||||
An operator for managing a set of wasmCloud hosts running on Kubernetes and
|
||||
manage wasmCloud appliations using WADM.
|
||||
An operator for managing a set of [wasmCloud hosts](https://github.com/wasmCloud/wasmCloud/) running on Kubernetes and
|
||||
manage [wasmCloud applications using wadm](https://github.com/wasmcloud/wadm).
|
||||
The goal is to easily be able to run WasmCloud hosts on a Kubernetes cluster.
|
||||
|
||||
## CusterConfig Custom Resource Definition (CRD)
|
||||
## WasmCloudHostConfig Custom Resource Definition (CRD)
|
||||
|
||||
The WasmCloudHostConfig CRD describes the desired state of a set of wasmCloud
|
||||
hosts connected to the same lattice.
|
||||
|
@ -17,18 +17,20 @@ metadata:
|
|||
spec:
|
||||
# The number of wasmCloud host pods to run
|
||||
hostReplicas: 2
|
||||
# The cluster issuers to use for each host
|
||||
issuers:
|
||||
- CDKF6OKPOBQKAX57UOXO7SCHURTOZWKWIVPC2HFJTGFXY5VJX44ECEHH
|
||||
# The lattice to connect the hosts to
|
||||
lattice: 83a5b52e-17cf-4080-bac8-f844099f142e
|
||||
lattice: default
|
||||
# Additional labels to apply to the host other than the defaults set in the operator
|
||||
hostLabels:
|
||||
some-label: value
|
||||
# The address to connect to nats
|
||||
natsAddress: nats://nats.default.svc.cluster.local
|
||||
# Which wasmCloud version to use
|
||||
version: 0.81.0
|
||||
# The name of a secret in the same namespace that provides the required secrets.
|
||||
secretName: cluster-secrets
|
||||
version: 1.0.4
|
||||
# Enable the following to run the wasmCloud hosts as a DaemonSet
|
||||
#daemonset: true
|
||||
# The name of the image pull secret to use with wasmCloud hosts so that they
|
||||
# can authenticate to a private registry to pull components.
|
||||
# registryCredentialsSecret: my-registry-secret
|
||||
```
|
||||
|
||||
The CRD requires a Kubernetes Secret with the following keys:
|
||||
|
@ -38,19 +40,54 @@ apiVersion: v1
|
|||
kind: Secret
|
||||
metadata:
|
||||
name: my-wasmcloud-cluster
|
||||
data:
|
||||
# You can generate this with wash:
|
||||
# wash keys gen cluster
|
||||
WASMCLOUD_CLUSTER_SEED: <seed>
|
||||
# Only required if using a NATS creds file
|
||||
# nats.creds: <base64 encoded creds file>
|
||||
# Only required if using OCI private registry
|
||||
# OCI_REGISTRY_PASSWORD: <password>
|
||||
#data:
|
||||
# Only required if using a NATS creds file
|
||||
# nats.creds: <creds file>
|
||||
```
|
||||
|
||||
The operator will fail to provision the wasmCloud Deployment if any of these
|
||||
secrets are missing!
|
||||
|
||||
#### Customizing the images used for wasmCloud host and NATS leaf
|
||||
|
||||
If you would like to customize the registry or image that gets used to provision the wasmCloud hosts and the NATS leaf that runs alongside them, you can specify the following options in the above `WasmCloudHostConfig` CRD.
|
||||
|
||||
For wasmCloud Host, use the `image` field:
|
||||
|
||||
```yaml
|
||||
apiVersion: k8s.wasmcloud.dev/v1alpha1
|
||||
kind: WasmCloudHostConfig
|
||||
metadata:
|
||||
name: my-wasmcloud-cluster
|
||||
spec:
|
||||
# other config options omitted
|
||||
image: registry.example.com/wasmcloud:1.0.2
|
||||
```
|
||||
|
||||
For the NATS leaf, use the `natsImageLeaf` field:
|
||||
|
||||
```yaml
|
||||
apiVersion: k8s.wasmcloud.dev/v1alpha1
|
||||
kind: WasmCloudHostConfig
|
||||
metadata:
|
||||
name: my-wasmcloud-cluster
|
||||
spec:
|
||||
# other config options omitted
|
||||
natsLeafImage: registry.example.com/nats:2.10.16
|
||||
```
|
||||
|
||||
### Image Pull Secrets
|
||||
|
||||
You can also specify an image pull secret to use use with the wasmCloud hosts
|
||||
so that they can pull components from a private registry. This secret needs to
|
||||
be in the same namespace as the WasmCloudHostConfig CRD and must be a
|
||||
`kubernetes.io/dockerconfigjson` type secret. See the [Kubernetes
|
||||
documentation](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#registry-secret-existing-credentials)
|
||||
for more information on how to provision that secret.
|
||||
|
||||
Once it is created, you can reference it in the WasmCloudHostConfig CRD by
|
||||
setting the `registryCredentialsSecret` field to the name of the secret.
|
||||
|
||||
## Deploying the operator
|
||||
|
||||
A wasmCloud cluster requires a few things to run:
|
||||
|
@ -83,23 +120,45 @@ config:
|
|||
```
|
||||
|
||||
```sh
|
||||
helm upgrade --install -f values.yaml nats-cluster nats/nats
|
||||
helm repo add nats https://nats-io.github.io/k8s/helm/charts/
|
||||
helm upgrade --install -f values.yaml nats nats/nats
|
||||
```
|
||||
|
||||
### Running WADM
|
||||
### Running Wadm
|
||||
|
||||
WADM can be run as a standalone binary or as a container. The following
|
||||
command will start WADM as a Kubernetes deployment:
|
||||
You can run Wadm in your Kubernetes cluster using our Helm chart. For a minimal deployment using the
|
||||
NATS server deployed above, all you need in your `values.yaml` file is:
|
||||
|
||||
```yaml
|
||||
wadm:
|
||||
config:
|
||||
nats:
|
||||
server: "nats.default.svc.cluster.local:4222"
|
||||
```
|
||||
|
||||
You can deploy Wadm using your values file and Helm:
|
||||
|
||||
```sh
|
||||
helm install wadm -f wadm-values.yaml --version 0.2.0 oci://ghcr.io/wasmcloud/charts/wadm
|
||||
```
|
||||
|
||||
### Start the operator
|
||||
|
||||
```sh
|
||||
kubectl kustomize build deploy/local | kubectl apply -f -
|
||||
kubectl kustomize deploy/base | kubectl apply -f -
|
||||
```
|
||||
|
||||
## Automatically Syncing Kubernetes Services
|
||||
|
||||
The operator automatically creates Kubernetes Services for wasmCloud
|
||||
applications. Right now this is limited only to applications that deploy the
|
||||
wasmCloud httpserver component using a `daemonscaler`, but additional support
|
||||
for `spreadscalers` will be added in the future.
|
||||
|
||||
If you specify host label selectors on the `daemonscaler` then the operator
|
||||
will honor those labels and will only create a service for the pods that match
|
||||
those label selectors.
|
||||
|
||||
## Argo CD Health Check
|
||||
|
||||
Argo CD provides a way to define a [custom health
|
||||
|
@ -157,7 +216,6 @@ data:
|
|||
include [Kind](https://kind.sigs.k8s.io/) or Docker Desktop.
|
||||
- `RUST_LOG=info cargo run`
|
||||
|
||||
|
||||
## Types crate
|
||||
|
||||
This repo stores the types for any CRDs used by the operator in a separate
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
# Reporting a security issue
|
||||
|
||||
Please refer to the [wasmCloud Security Process and Policy](https://github.com/wasmCloud/wasmCloud/blob/main/SECURITY.md) for details on how to report security issues and vulnerabilities.
|
|
@ -0,0 +1,23 @@
|
|||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
|
@ -0,0 +1,16 @@
|
|||
apiVersion: v2
|
||||
name: wasmcloud-operator
|
||||
description: A Helm chart for deploying the wasmcloud-operator on Kubernetes
|
||||
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.1.6
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "0.4.0"
|
|
@ -0,0 +1,3 @@
|
|||
validate-maintainers: false
|
||||
target-branch: main # TODO: Remove this once chart-testing 3.10.1+ is released
|
||||
helm-extra-args: --timeout 60s
|
|
@ -0,0 +1,96 @@
|
|||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "wasmcloud-operator.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "wasmcloud-operator.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "wasmcloud-operator.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Namespace that's used for setting up cluster role bindings and such
|
||||
*/}}
|
||||
{{- define "wasmcloud-operator.namespace" -}}
|
||||
{{- default "default" .Release.Namespace }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "wasmcloud-operator.labels" -}}
|
||||
helm.sh/chart: {{ include "wasmcloud-operator.chart" . }}
|
||||
{{ include "wasmcloud-operator.selectorLabels" . }}
|
||||
app.kubernetes.io/component: operator
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/part-of: wasmcloud-operator
|
||||
{{- with .Values.additionalLabels }}
|
||||
{{ . | toYaml }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "wasmcloud-operator.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "wasmcloud-operator.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "wasmcloud-operator.service-account" -}}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
{{- default (include "wasmcloud-operator.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the cluster role to use
|
||||
*/}}
|
||||
{{- define "wasmcloud-operator.cluster-role" -}}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
{{- default (include "wasmcloud-operator.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the cluster role binding to use
|
||||
*/}}
|
||||
{{- define "wasmcloud-operator.cluster-role-binding" -}}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
{{- default (include "wasmcloud-operator.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,118 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ include "wasmcloud-operator.cluster-role" . }}
|
||||
labels:
|
||||
{{- include "wasmcloud-operator.labels" . | nindent 4 }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- secrets
|
||||
- services
|
||||
- configmaps
|
||||
- serviceaccounts
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- delete
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- deployments
|
||||
- daemonsets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- delete
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- rbac.authorization.k8s.io
|
||||
resources:
|
||||
- rolebindings
|
||||
- roles
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- delete
|
||||
- patch
|
||||
- apiGroups:
|
||||
- apiextensions.k8s.io
|
||||
resources:
|
||||
- customresourcedefinitions
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- delete
|
||||
- patch
|
||||
- apiGroups:
|
||||
- apiregistration.k8s.io
|
||||
resources:
|
||||
- apiservices
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- discovery.k8s.io
|
||||
resources:
|
||||
- endpointslices
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- k8s.wasmcloud.dev
|
||||
resources:
|
||||
- wasmcloudhostconfigs
|
||||
- wasmcloudhostconfigs/status
|
||||
verbs:
|
||||
- "*"
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ include "wasmcloud-operator.cluster-role-binding" . }}
|
||||
labels:
|
||||
{{- include "wasmcloud-operator.labels" . | nindent 4 }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ include "wasmcloud-operator.cluster-role" . }}
|
||||
subjects:
|
||||
- apiGroup: ""
|
||||
kind: ServiceAccount
|
||||
name: {{ include "wasmcloud-operator.service-account" . }}
|
||||
namespace: {{ include "wasmcloud-operator.namespace" . }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ include "wasmcloud-operator.cluster-role-binding" . }}-delegator
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:auth-delegator
|
||||
subjects:
|
||||
- apiGroup: ""
|
||||
kind: ServiceAccount
|
||||
name: {{ include "wasmcloud-operator.service-account" . }}
|
||||
namespace: {{ include "wasmcloud-operator.namespace" . }}
|
|
@ -0,0 +1,61 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "wasmcloud-operator.fullname" . }}
|
||||
labels:
|
||||
{{- include "wasmcloud-operator.labels" . | nindent 4 }}
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "wasmcloud-operator.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
{{- with .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "wasmcloud-operator.labels" . | nindent 8 }}
|
||||
{{- with .Values.podLabels }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "wasmcloud-operator.service-account" . }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
env:
|
||||
- name: RUST_LOG
|
||||
value: info,async_nats=error
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
ports:
|
||||
- name: https
|
||||
containerPort: {{ .Values.service.port }}
|
||||
protocol: TCP
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
|
@ -0,0 +1,15 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "wasmcloud-operator.fullname" . }}
|
||||
labels:
|
||||
{{- include "wasmcloud-operator.labels" . | nindent 4 }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: https
|
||||
protocol: TCP
|
||||
name: https
|
||||
selector:
|
||||
{{- include "wasmcloud-operator.selectorLabels" . | nindent 4 }}
|
|
@ -0,0 +1,13 @@
|
|||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "wasmcloud-operator.service-account" . }}
|
||||
labels:
|
||||
{{- include "wasmcloud-operator.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
automountServiceAccountToken: {{ .Values.serviceAccount.automount }}
|
||||
{{- end }}
|
|
@ -0,0 +1,15 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: "{{ include "wasmcloud-operator.fullname" . }}-test-connection"
|
||||
labels:
|
||||
{{- include "wasmcloud-operator.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": test
|
||||
spec:
|
||||
containers:
|
||||
- name: wget
|
||||
image: alpine
|
||||
command: ['wget']
|
||||
args: ['--no-check-certificate', 'https://{{ include "wasmcloud-operator.fullname" . }}:{{ .Values.service.port }}/apis/core.oam.dev/v1beta1']
|
||||
restartPolicy: Never
|
|
@ -0,0 +1,63 @@
|
|||
# Default values for wasmcloud-operator.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
image:
|
||||
repository: ghcr.io/wasmcloud/wasmcloud-operator
|
||||
pullPolicy: IfNotPresent
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: ""
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
additionalLabels: {}
|
||||
# app: wasmcloud-operator
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# Automatically mount a ServiceAccount's API credentials?
|
||||
automount: true
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
|
||||
podAnnotations: {}
|
||||
podLabels: {}
|
||||
|
||||
podSecurityContext: {}
|
||||
# fsGroup: 2000
|
||||
|
||||
securityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 8443
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
|
@ -1,11 +1,16 @@
|
|||
[package]
|
||||
name = "wasmcloud-operator-types"
|
||||
version = "0.1.0"
|
||||
version = "0.1.9"
|
||||
edition = "2021"
|
||||
|
||||
[package.metadata.cargo-machete]
|
||||
# NOTE: This exists because kube-derive needs it, and for reasons I don't
|
||||
# fully understand, it's not coming through kube-derive's own depedendencies.
|
||||
ignored = ["serde_json"]
|
||||
|
||||
[dependencies]
|
||||
k8s-openapi = {workspace = true}
|
||||
kube = {workspace = true, features = ["derive"]}
|
||||
schemars = {workspace = true}
|
||||
serde = {workspace = true}
|
||||
serde_json = {workspace = true}
|
||||
k8s-openapi = { workspace = true }
|
||||
kube = { workspace = true, features = ["derive"] }
|
||||
schemars = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
|
@ -1,8 +1,8 @@
|
|||
use k8s_openapi::api::core::v1::ResourceRequirements;
|
||||
use k8s_openapi::api::core::v1::{Container, PodSpec, ResourceRequirements, Volume};
|
||||
use kube::CustomResource;
|
||||
use schemars::JsonSchema;
|
||||
use schemars::{gen::SchemaGenerator, schema::Schema, JsonSchema};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::collections::{BTreeMap, BTreeSet};
|
||||
|
||||
#[derive(CustomResource, Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||
#[cfg_attr(test, derive(Default))]
|
||||
|
@ -10,33 +10,195 @@ use std::collections::HashMap;
|
|||
kind = "WasmCloudHostConfig",
|
||||
group = "k8s.wasmcloud.dev",
|
||||
version = "v1alpha1",
|
||||
shortname = "chc",
|
||||
shortname = "whc",
|
||||
namespaced,
|
||||
status = "WasmCloudHostConfigStatus",
|
||||
printcolumn = r#"{"name":"App Count", "type":"integer", "jsonPath":".status.app_count"}"#
|
||||
)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct WasmCloudHostConfigSpec {
|
||||
/// The number of replicas to use for the wasmCloud host Deployment.
|
||||
#[serde(default = "default_host_replicas")]
|
||||
pub host_replicas: u32,
|
||||
pub issuers: Vec<String>,
|
||||
/// DEPRECATED: A list of cluster issuers to use when provisioning hosts. See
|
||||
/// https://wasmcloud.com/docs/deployment/security/zero-trust-invocations for more information.
|
||||
#[deprecated(since = "0.3.1", note = "Removed in wasmcloud 1.0.0")]
|
||||
pub issuers: Option<Vec<String>>,
|
||||
/// The lattice to use for these hosts.
|
||||
pub lattice: String,
|
||||
pub host_labels: Option<HashMap<String, String>>,
|
||||
/// An optional set of labels to apply to these hosts.
|
||||
pub host_labels: Option<BTreeMap<String, String>>,
|
||||
/// The version of the wasmCloud host to deploy.
|
||||
pub version: String,
|
||||
pub secret_name: String,
|
||||
/// The image to use for the wasmCloud host.
|
||||
/// If not provided, the default image for the version will be used.
|
||||
/// Also if provided, the version field will be ignored.
|
||||
pub image: Option<String>,
|
||||
/// The image to use for the NATS leaf that is deployed alongside the wasmCloud host.
|
||||
/// If not provided, the default upstream image will be used.
|
||||
/// If provided, it should be fully qualified by including the image tag.
|
||||
pub nats_leaf_image: Option<String>,
|
||||
/// Optional. The name of a secret containing a set of NATS credentials under 'nats.creds' key.
|
||||
pub secret_name: Option<String>,
|
||||
/// Enable structured logging for host logs.
|
||||
pub enable_structured_logging: Option<bool>,
|
||||
/// Name of a secret containing the registry credentials
|
||||
pub registry_credentials_secret: Option<String>,
|
||||
pub resources: Option<WasmCloudHostConfigResources>,
|
||||
/// The control topic prefix to use for the host.
|
||||
pub control_topic_prefix: Option<String>,
|
||||
/// The leaf node domain to use for the NATS sidecar. Defaults to "leaf".
|
||||
#[serde(default = "default_leaf_node_domain")]
|
||||
pub leaf_node_domain: String,
|
||||
/// Enable the config service for this host.
|
||||
#[serde(default)]
|
||||
pub config_service_enabled: bool,
|
||||
/// The address of the NATS server to connect to. Defaults to "nats://nats.default.svc.cluster.local".
|
||||
#[serde(default = "default_nats_address")]
|
||||
pub nats_address: String,
|
||||
/// The port of the NATS server to connect to. Defaults to 4222.
|
||||
#[serde(default = "default_nats_port")]
|
||||
pub nats_client_port: u16,
|
||||
/// The port of the NATS server to connect to for leaf node connections. Defaults to 7422.
|
||||
#[serde(default = "default_nats_leafnode_port")]
|
||||
pub nats_leafnode_port: u16,
|
||||
/// The Jetstream domain to use for the NATS sidecar. Defaults to "default".
|
||||
#[serde(default = "default_jetstream_domain")]
|
||||
pub jetstream_domain: String,
|
||||
/// Allow the host to deploy using the latest tag on OCI components or providers
|
||||
#[serde(default)]
|
||||
pub allow_latest: bool,
|
||||
/// Allow the host to pull artifacts from OCI registries insecurely
|
||||
#[serde(default)]
|
||||
pub allowed_insecure: Option<Vec<String>>,
|
||||
/// The log level to use for the host. Defaults to "INFO".
|
||||
#[serde(default = "default_log_level")]
|
||||
pub log_level: String,
|
||||
pub policy_service: Option<PolicyService>,
|
||||
/// Kubernetes scheduling options for the wasmCloud host.
|
||||
pub scheduling_options: Option<KubernetesSchedulingOptions>,
|
||||
/// Observability options for configuring the OpenTelemetry integration
|
||||
pub observability: Option<ObservabilityConfiguration>,
|
||||
/// Certificates: Authorities, client certificates
|
||||
pub certificates: Option<WasmCloudHostCertificates>,
|
||||
/// wasmCloud secrets topic prefix, must not be empty if set.
|
||||
pub secrets_topic_prefix: Option<String>,
|
||||
/// Maximum memory in bytes that components can use.
|
||||
pub max_linear_memory_bytes: Option<u32>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct PolicyService {
|
||||
pub topic: Option<String>,
|
||||
pub timeout_ms: Option<u32>,
|
||||
pub changes_topic: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct KubernetesSchedulingOptions {
|
||||
/// Run hosts as a DaemonSet instead of a Deployment.
|
||||
#[serde(default)]
|
||||
pub daemonset: bool,
|
||||
/// Kubernetes resources to allocate for the host. See
|
||||
/// https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ for valid
|
||||
/// values to use here.
|
||||
pub resources: Option<WasmCloudHostConfigResources>,
|
||||
/// Any other pod template spec options to set for the underlying wasmCloud host pods.
|
||||
#[schemars(schema_with = "pod_schema")]
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub pod_template_additions: Option<PodSpec>,
|
||||
/// Allow for customization of either the wasmcloud or nats leaf container inside of the wasmCloud host pod.
|
||||
pub container_template_additions: Option<ContainerTemplateAdditions>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ContainerTemplateAdditions {
|
||||
#[schemars(schema_with = "container_schema")]
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub nats: Option<Container>,
|
||||
#[schemars(schema_with = "container_schema")]
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub wasmcloud: Option<Container>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ObservabilityConfiguration {
|
||||
#[serde(default)]
|
||||
pub enable: bool,
|
||||
pub endpoint: String,
|
||||
pub protocol: Option<OtelProtocol>,
|
||||
pub logs: Option<OtelSignalConfiguration>,
|
||||
pub metrics: Option<OtelSignalConfiguration>,
|
||||
pub traces: Option<OtelSignalConfiguration>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub enum OtelProtocol {
|
||||
Grpc,
|
||||
Http,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for OtelProtocol {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"{}",
|
||||
match self {
|
||||
OtelProtocol::Grpc => "grpc",
|
||||
OtelProtocol::Http => "http",
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct OtelSignalConfiguration {
|
||||
pub enable: Option<bool>,
|
||||
pub endpoint: Option<String>,
|
||||
}
|
||||
|
||||
/// This is a workaround for the fact that we can't override the PodSpec schema to make containers
|
||||
/// an optional field. It generates the OpenAPI schema for the PodSpec type the same way that
|
||||
/// kube.rs does while dropping any required fields.
|
||||
fn pod_schema(_gen: &mut SchemaGenerator) -> Schema {
|
||||
let gen = schemars::gen::SchemaSettings::openapi3()
|
||||
.with(|s| {
|
||||
s.inline_subschemas = true;
|
||||
s.meta_schema = None;
|
||||
})
|
||||
.with_visitor(kube::core::schema::StructuralSchemaRewriter)
|
||||
.into_generator();
|
||||
let mut val = gen.into_root_schema_for::<PodSpec>();
|
||||
// Drop `containers` as a required field, along with any others.
|
||||
val.schema.object.as_mut().unwrap().required = BTreeSet::new();
|
||||
val.schema.into()
|
||||
}
|
||||
|
||||
/// This is a workaround for the fact that we can't override the Container schema to make name
|
||||
/// an optional field. It generates the OpenAPI schema for the Container type the same way that
|
||||
/// kube.rs does while dropping any required fields.
|
||||
fn container_schema(_gen: &mut SchemaGenerator) -> Schema {
|
||||
let gen = schemars::gen::SchemaSettings::openapi3()
|
||||
.with(|s| {
|
||||
s.inline_subschemas = true;
|
||||
s.meta_schema = None;
|
||||
})
|
||||
.with_visitor(kube::core::schema::StructuralSchemaRewriter)
|
||||
.into_generator();
|
||||
let mut val = gen.into_root_schema_for::<Container>();
|
||||
// Drop `name` as a required field as it will be filled in from container
|
||||
// definition coming the controller that this configuration gets merged into.
|
||||
val.schema.object.as_mut().unwrap().required = BTreeSet::new();
|
||||
val.schema.into()
|
||||
}
|
||||
|
||||
fn default_host_replicas() -> u32 {
|
||||
1
|
||||
}
|
||||
|
||||
fn default_jetstream_domain() -> String {
|
||||
|
@ -55,6 +217,19 @@ fn default_log_level() -> String {
|
|||
"INFO".to_string()
|
||||
}
|
||||
|
||||
fn default_nats_port() -> u16 {
|
||||
4222
|
||||
}
|
||||
|
||||
fn default_nats_leafnode_port() -> u16 {
|
||||
7422
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, JsonSchema)]
|
||||
pub struct WasmCloudHostCertificates {
|
||||
pub authorities: Option<Vec<Volume>>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug, JsonSchema)]
|
||||
pub struct WasmCloudHostConfigResources {
|
||||
pub nats: Option<ResourceRequirements>,
|
||||
|
|
|
@ -15,7 +15,7 @@ spec:
|
|||
spec:
|
||||
serviceAccountName: wasmcloud-operator
|
||||
containers:
|
||||
- image: ghcr.io/wasmcloud/wasmcloud-operator:latest
|
||||
- image: ghcr.io/wasmcloud/wasmcloud-operator:0.5.0
|
||||
imagePullPolicy: Always
|
||||
name: wasmcloud-operator
|
||||
ports:
|
||||
|
@ -49,6 +49,7 @@ rules:
|
|||
- services
|
||||
- configmaps
|
||||
- serviceaccounts
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
|
@ -61,6 +62,7 @@ rules:
|
|||
- apps
|
||||
resources:
|
||||
- deployments
|
||||
- daemonsets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
|
@ -103,6 +105,17 @@ rules:
|
|||
- list
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- discovery.k8s.io
|
||||
resources:
|
||||
- endpointslices
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- k8s.wasmcloud.dev
|
||||
resources:
|
||||
|
|
|
@ -1,3 +1,8 @@
|
|||
- op: add
|
||||
path: /spec/template/spec/containers/0/image
|
||||
value: localhost:5001/wasmcloud-operator:latest
|
||||
- op: replace
|
||||
path: /spec/template/spec/containers/0/env/0
|
||||
value:
|
||||
name: RUST_LOG
|
||||
value: info,controller::services=debug,async_nats=warn,controller::controller=debug
|
||||
|
|
|
@ -0,0 +1,97 @@
|
|||
apiVersion: k8s.wasmcloud.dev/v1alpha1
|
||||
kind: WasmCloudHostConfig
|
||||
metadata:
|
||||
name: my-wasmcloud-cluster
|
||||
namespace: default
|
||||
spec:
|
||||
# Optional: Number of hosts (pods). Defaults to 1.
|
||||
hostReplicas: 1
|
||||
# Required: The lattice to connect the hosts to.
|
||||
lattice: default
|
||||
# Optional: Additional labels to apply to the host other than the defaults set in the controller.
|
||||
hostLabels:
|
||||
test: value
|
||||
cluster: kind
|
||||
# Required: Which wasmCloud version to use.
|
||||
version: "1.0.4"
|
||||
# Optional: The image to use for the wasmCloud host.
|
||||
# If provided, the 'version' field will be ignored.
|
||||
image: "registry/wasmcloud:tag"
|
||||
# Optional: The image to use for the NATS leaf that is deployed alongside the wasmCloud host.
|
||||
# If not provided, the default upstream image will be used.
|
||||
natsLeafImage: "registry/nats:tag"
|
||||
# Optional. The name of a secret containing a set of NATS credentials under 'nats.creds' key.
|
||||
secretName: "wasmcloud-host-nats-secret"
|
||||
# Optional: Enable structured logging for host logs. Defaults to "false".
|
||||
enableStructuredLogging: true
|
||||
# Optional: The name of a secret containing the registry credentials.
|
||||
# See https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/#create-a-secret-by-providing-credentials-on-the-command-line
|
||||
registryCredentialsSecret: "wasmcloud-pull-secret"
|
||||
# Optional: The control topic prefix to use for the host. Defaults to "wasmbus.ctl"
|
||||
controlTopicPrefix: "wasmbus.custom-ctl"
|
||||
# Optional: The leaf node domain to use for the NATS sidecar. Defaults to "leaf".
|
||||
leafNodeDomain: "custom-leaf"
|
||||
# Optional: Enable the config service for this host. Defaults to "false".
|
||||
# Makes wasmCloud host issue requests to a config service on startup.
|
||||
configServiceEnabled: true
|
||||
# Optional: The log level to use for the host. Defaults to "INFO".
|
||||
logLevel: INFO
|
||||
# Optional: The address of the NATS server to connect to. Defaults to "nats://nats.default.svc.cluster.local".
|
||||
natsAddress: nats://nats.default.svc.cluster.local
|
||||
# Optional: Allow the host to deploy using the latest tag on OCI components or providers. Defaults to "false".
|
||||
allowLatest: true
|
||||
# Optional: Allow the host to pull artifacts from OCI registries insecurely.
|
||||
allowedInsecure:
|
||||
- "localhost:5001"
|
||||
- "kind-registry:5000"
|
||||
# Optional: Policy service configuration.
|
||||
policyService:
|
||||
# If provided, enables policy checks on start actions and component invocations.
|
||||
topic: "wasmcloud.policy"
|
||||
# If provided, allows the host to subscribe to updates on past policy decisions. Requires 'topic' above to be set.
|
||||
changesTopic: "wasmcloud.policy.changes"
|
||||
# If provided, allows setting a custom timeout for requesting policy decisions. Defaults to 1000. Requires 'topic' to be set.
|
||||
timeoutMs: 10000
|
||||
# Optional: Observability options for configuring the OpenTelemetry integration.
|
||||
observability:
|
||||
# NOTE: Enables all signals (logs/metrics/traces) at once. Set it to 'false' and enable each signal individually in case you don't need all of them.
|
||||
enable: true
|
||||
endpoint: "otel-collector.svc"
|
||||
# Either 'grpc' or 'http'
|
||||
protocol: "http"
|
||||
logs:
|
||||
enable: false
|
||||
endpoint: "logs-specific-otel-collector.svc"
|
||||
metrics:
|
||||
enable: false
|
||||
endpoint: "metrics-specific-otel-collector.svc"
|
||||
traces:
|
||||
enable: false
|
||||
endpoint: "traces-specific-otel-collector.svc"
|
||||
# Optional: Subject prefix that will be used by the host to query for wasmCloud Secrets.
|
||||
# See https://wasmcloud.com/docs/concepts/secrets for more context
|
||||
secretsTopicPrefix: "wasmcloud.secrets"
|
||||
# Optional: The maximum amount of memory bytes that a component can allocate.
|
||||
maxLinearMemoryBytes: 20000000
|
||||
# Optional: Additional options to control how the underlying wasmCloud hosts are scheduled in Kubernetes.
|
||||
# This includes setting resource requirements for the nats and wasmCloud host
|
||||
# containers along with any additional pot template settings.
|
||||
schedulingOptions:
|
||||
# Optional: Enable the following to run the wasmCloud hosts as a DaemonSet. Defaults to "false".
|
||||
daemonset: true
|
||||
# Optional: Set the resource requirements for the nats and wasmCloud host containers.
|
||||
# See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ for valid values
|
||||
resources:
|
||||
nats:
|
||||
requests:
|
||||
cpu: "1"
|
||||
wasmCloudHost:
|
||||
requests:
|
||||
cpu: "1"
|
||||
# Optional: Any additional pod template settings to apply to the wasmCloud host pods.
|
||||
# See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.29/#podspec-v1-core for all valid options.
|
||||
# Note that you *cannot* set the `containers` field here as it is managed by the controller.
|
||||
podTemplateAdditions:
|
||||
spec:
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
|
@ -0,0 +1,87 @@
|
|||
# Example setup
|
||||
|
||||
This example shows the bare minimum requirements to deploy applications on wasmCloud.
|
||||
|
||||
It relies on the Kubernetes `default` namespace for simplicity.
|
||||
|
||||
## Install [NATS](https://github.com/nats-io/nats-server)
|
||||
|
||||
```bash
|
||||
helm repo add nats https://nats-io.github.io/k8s/helm/charts/
|
||||
helm upgrade --install -f nats-values.yaml nats nats/nats
|
||||
```
|
||||
|
||||
Validate installation with:
|
||||
|
||||
```bash
|
||||
# make sure pods are ready
|
||||
kubectl rollout status deploy,sts -l app.kubernetes.io/instance=nats
|
||||
```
|
||||
|
||||
## Install wasmCloud Application Deployment Manager - [wadm](https://github.com/wasmCloud/wadm)
|
||||
|
||||
```sh
|
||||
helm install wadm -f wadm-values.yaml oci://ghcr.io/wasmcloud/charts/wadm
|
||||
```
|
||||
|
||||
Validate installation with:
|
||||
|
||||
```bash
|
||||
# make sure pods are ready
|
||||
kubectl rollout status deploy -l app.kubernetes.io/instance=wadm
|
||||
```
|
||||
|
||||
## Install the operator
|
||||
|
||||
```sh
|
||||
kubectl apply -k ../../deploy/base
|
||||
```
|
||||
|
||||
Validate installation with:
|
||||
|
||||
```bash
|
||||
# make sure pods are ready
|
||||
kubectl rollout status deploy -l app=wasmcloud-operator -n wasmcloud-operator
|
||||
# apiservice should be available
|
||||
kubectl get apiservices.apiregistration.k8s.io v1beta1.core.oam.dev
|
||||
```
|
||||
|
||||
## Create wasmcloud cluster
|
||||
|
||||
```bash
|
||||
kubectl apply -f wasmcloud-host.yaml
|
||||
```
|
||||
|
||||
Check wasmCloud host status with:
|
||||
|
||||
```bash
|
||||
kubectl describe wasmcloudhostconfig wasmcloud-host
|
||||
```
|
||||
|
||||
## Managing applications using kubectl
|
||||
|
||||
Install the rust hello world application:
|
||||
|
||||
```bash
|
||||
kubectl apply -f hello-world-application.yaml
|
||||
```
|
||||
|
||||
Check application status with:
|
||||
|
||||
```bash
|
||||
kubectl get applications
|
||||
```
|
||||
|
||||
## Managing applications with wash
|
||||
|
||||
Port forward into the NATS cluster. 4222 = NATS Service, 4223 = NATS Websockets
|
||||
|
||||
```bash
|
||||
kubectl port-forward svc/nats 4222:4222 4223:4223
|
||||
```
|
||||
|
||||
In another shell:
|
||||
|
||||
```bash
|
||||
wash app list
|
||||
```
|
|
@ -0,0 +1,53 @@
|
|||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: hello-world
|
||||
annotations:
|
||||
version: v0.0.1
|
||||
description: "HTTP hello world demo in Rust, using the WebAssembly Component Model and WebAssembly Interfaces Types (WIT)"
|
||||
wasmcloud.dev/authors: wasmCloud team
|
||||
wasmcloud.dev/source-url: https://github.com/wasmCloud/wasmCloud/blob/main/examples/rusg/components/http-hello-world/wadm.yaml
|
||||
wasmcloud.dev/readme-md-url: https://github.com/wasmCloud/wasmCloud/blob/main/examples/rusg/components/http-hello-world/README.md
|
||||
wasmcloud.dev/homepage: https://github.com/wasmCloud/wasmCloud/tree/main/examples/rusg/components/http-hello-world
|
||||
wasmcloud.dev/categories: |
|
||||
http,http-server,rust,hello-world,example
|
||||
spec:
|
||||
components:
|
||||
- name: http-component
|
||||
type: component
|
||||
properties:
|
||||
image: ghcr.io/wasmcloud/components/http-hello-world-rust:0.1.0
|
||||
traits:
|
||||
# Govern the spread/scheduling of the component
|
||||
- type: daemonscaler
|
||||
properties:
|
||||
replicas: 100
|
||||
|
||||
# Add a capability provider that enables HTTP access
|
||||
- name: httpserver
|
||||
type: capability
|
||||
properties:
|
||||
image: ghcr.io/wasmcloud/http-server:0.21.0
|
||||
traits:
|
||||
# Establish a unidirectional link from this http server provider (the "source")
|
||||
# to the `http-component` component (the "target") so the component can handle incoming HTTP requests.
|
||||
#
|
||||
# The source (this provider) is configured such that the HTTP server listens on 0.0.0.0:8000.
|
||||
# When running the application on Kubernetes with the wasmCloud operator, you can change the
|
||||
# port but the address must be 0.0.0.0.
|
||||
- type: link
|
||||
properties:
|
||||
target: http-component
|
||||
namespace: wasi
|
||||
package: http
|
||||
interfaces: [incoming-handler]
|
||||
source_config:
|
||||
- name: default-http
|
||||
properties:
|
||||
address: 0.0.0.0:8000
|
||||
# When running the application on Kubernetes with the wasmCloud operator,
|
||||
# the operator automatically creates a Kubernetes service for applications that use
|
||||
# the httpserver provider with a daemonscaler.
|
||||
- type: daemonscaler
|
||||
properties:
|
||||
replicas: 1
|
|
@ -0,0 +1,16 @@
|
|||
config:
|
||||
cluster:
|
||||
enabled: true
|
||||
replicas: 3
|
||||
leafnodes:
|
||||
enabled: true
|
||||
websocket:
|
||||
enabled: true
|
||||
port: 4223
|
||||
jetstream:
|
||||
enabled: true
|
||||
fileStore:
|
||||
pvc:
|
||||
size: 10Gi
|
||||
merge:
|
||||
domain: default
|
|
@ -0,0 +1,6 @@
|
|||
wadm:
|
||||
image:
|
||||
tag: v0.12.2
|
||||
config:
|
||||
nats:
|
||||
server: "nats.default.svc.cluster.local:4222"
|
|
@ -0,0 +1,7 @@
|
|||
apiVersion: k8s.wasmcloud.dev/v1alpha1
|
||||
kind: WasmCloudHostConfig
|
||||
metadata:
|
||||
name: wasmcloud-host
|
||||
spec:
|
||||
lattice: default
|
||||
version: "1.0.4"
|
|
@ -0,0 +1,64 @@
|
|||
#!/bin/sh
|
||||
set -o errexit
|
||||
|
||||
# 1. Create registry container unless it already exists
|
||||
reg_name='kind-registry'
|
||||
reg_port='5001'
|
||||
if [ "$(docker inspect -f '{{.State.Running}}' "${reg_name}" 2>/dev/null || true)" != 'true' ]; then
|
||||
docker run \
|
||||
-d --restart=always -p "127.0.0.1:${reg_port}:5000" --name "${reg_name}" \
|
||||
registry:2
|
||||
fi
|
||||
|
||||
# 2. Create kind cluster with containerd registry config dir enabled
|
||||
# TODO: kind will eventually enable this by default and this patch will
|
||||
# be unnecessary.
|
||||
#
|
||||
# See:
|
||||
# https://github.com/kubernetes-sigs/kind/issues/2875
|
||||
# https://github.com/containerd/containerd/blob/main/docs/cri/config.md#registry-configuration
|
||||
# See: https://github.com/containerd/containerd/blob/main/docs/hosts.md
|
||||
cat <<EOF | kind create cluster --image kindest/node:v1.29.4 --config=-
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
containerdConfigPatches:
|
||||
- |-
|
||||
[plugins."io.containerd.grpc.v1.cri".registry]
|
||||
config_path = "/etc/containerd/certs.d"
|
||||
EOF
|
||||
|
||||
# 3. Add the registry config to the nodes
|
||||
#
|
||||
# This is necessary because localhost resolves to loopback addresses that are
|
||||
# network-namespace local.
|
||||
# In other words: localhost in the container is not localhost on the host.
|
||||
#
|
||||
# We want a consistent name that works from both ends, so we tell containerd to
|
||||
# alias localhost:${reg_port} to the registry container when pulling images
|
||||
REGISTRY_DIR="/etc/containerd/certs.d/localhost:${reg_port}"
|
||||
for node in $(kind get nodes); do
|
||||
docker exec "${node}" mkdir -p "${REGISTRY_DIR}"
|
||||
cat <<EOF | docker exec -i "${node}" cp /dev/stdin "${REGISTRY_DIR}/hosts.toml"
|
||||
[host."http://${reg_name}:5000"]
|
||||
EOF
|
||||
done
|
||||
|
||||
# 4. Connect the registry to the cluster network if not already connected
|
||||
# This allows kind to bootstrap the network but ensures they're on the same network
|
||||
if [ "$(docker inspect -f='{{json .NetworkSettings.Networks.kind}}' "${reg_name}")" = 'null' ]; then
|
||||
docker network connect "kind" "${reg_name}"
|
||||
fi
|
||||
|
||||
# 5. Document the local registry
|
||||
# https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/generic/1755-communicating-a-local-registry
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: local-registry-hosting
|
||||
namespace: kube-public
|
||||
data:
|
||||
localRegistryHosting.v1: |
|
||||
host: "localhost:${reg_port}"
|
||||
help: "https://kind.sigs.k8s.io/docs/user/local-registry/"
|
||||
EOF
|
20
sample.yaml
20
sample.yaml
|
@ -1,20 +0,0 @@
|
|||
apiVersion: k8s.wasmcloud.dev/v1alpha1
|
||||
kind: WasmCloudHostConfig
|
||||
metadata:
|
||||
name: my-wasmcloud-cluster
|
||||
namespace: default
|
||||
spec:
|
||||
hostReplicas: 2
|
||||
issuers:
|
||||
- CDKF6OKPOBQKAX57UOXO7SCHURTOZWKWIVPC2HFJTGFXY5VJX44ECEHH
|
||||
# The lattice to connect the hosts to
|
||||
lattice: 83a5b52e-17cf-4080-bac8-f844099f142e
|
||||
# Additional labels to apply to the host other than the defaults set in the controller
|
||||
hostLabels:
|
||||
test: value
|
||||
# Which wasmCloud version to use
|
||||
version: 0.81.0
|
||||
# The name of a secret in the same namespace that provides the required secrets.
|
||||
secretName: cluster-secrets
|
||||
logLevel: INFO
|
||||
natsAddress: nats://nats-cluster.default.svc.cluster.local
|
|
@ -0,0 +1,13 @@
|
|||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// Configuration for the operator. If you are configuring the operator using environment variables
|
||||
/// then all values need to be prefixed with "WASMCLOUD_OPERATOR".
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
|
||||
pub struct OperatorConfig {
|
||||
#[serde(default = "default_stream_replicas")]
|
||||
pub stream_replicas: u16,
|
||||
}
|
||||
|
||||
fn default_stream_replicas() -> u16 {
|
||||
1
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -31,6 +31,9 @@ pub enum Error {
|
|||
#[error("Error retrieving secrets: {0}")]
|
||||
SecretError(String),
|
||||
|
||||
#[error("Certificate error: {0}")]
|
||||
CertificateError(String),
|
||||
|
||||
#[error("Error rendering template: {0}")]
|
||||
RenderError(#[from] RenderError),
|
||||
}
|
||||
|
@ -44,6 +47,7 @@ impl IntoResponse for Error {
|
|||
}
|
||||
}
|
||||
|
||||
pub mod config;
|
||||
pub mod controller;
|
||||
pub mod discovery;
|
||||
pub mod docker_secret;
|
||||
|
@ -51,6 +55,7 @@ pub mod header;
|
|||
pub(crate) mod openapi;
|
||||
pub mod resources;
|
||||
pub mod router;
|
||||
pub(crate) mod services;
|
||||
pub(crate) mod table;
|
||||
|
||||
pub use crate::controller::*;
|
||||
|
|
38
src/main.rs
38
src/main.rs
|
@ -1,7 +1,8 @@
|
|||
use anyhow::{anyhow, Result};
|
||||
use axum_server::{tls_rustls::RustlsConfig, Handle};
|
||||
use controller::{State, WasmcloudConfig};
|
||||
use controller::{config::OperatorConfig, State};
|
||||
|
||||
use config::Config;
|
||||
use k8s_openapi::apiextensions_apiserver::pkg::apis::apiextensions::v1::CustomResourceDefinition;
|
||||
use k8s_openapi::apimachinery::pkg::apis::meta::v1::ObjectMeta;
|
||||
use k8s_openapi::kube_aggregator::pkg::apis::apiregistration::v1::{
|
||||
|
@ -12,10 +13,12 @@ use kube::{
|
|||
client::Client,
|
||||
CustomResourceExt,
|
||||
};
|
||||
use opentelemetry::sdk::{
|
||||
trace::{self, RandomIdGenerator, Sampler},
|
||||
Resource as OTELResource,
|
||||
use opentelemetry::KeyValue;
|
||||
use opentelemetry_sdk::{
|
||||
trace::{RandomIdGenerator, Sampler},
|
||||
Resource,
|
||||
};
|
||||
use std::io::IsTerminal;
|
||||
use std::net::SocketAddr;
|
||||
use std::time::Duration;
|
||||
use tracing::{error, info};
|
||||
|
@ -25,14 +28,27 @@ use wasmcloud_operator_types::v1alpha1::WasmCloudHostConfig;
|
|||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
let args = std::env::args().collect::<Vec<_>>();
|
||||
if args.iter().any(|arg| arg == "-V" || arg == "--version") {
|
||||
let version = version();
|
||||
println!("{} {version}", env!("CARGO_BIN_NAME"));
|
||||
std::process::exit(0);
|
||||
}
|
||||
|
||||
let tracing_enabled = std::env::var("OTEL_EXPORTER_OTLP_ENDPOINT").is_ok();
|
||||
configure_tracing(tracing_enabled).map_err(|e| {
|
||||
error!("Failed to configure tracing: {}", e);
|
||||
e
|
||||
})?;
|
||||
info!("Starting controller");
|
||||
info!("Starting operator");
|
||||
|
||||
let config = WasmcloudConfig {};
|
||||
let cfg = Config::builder()
|
||||
.add_source(config::Environment::with_prefix("WASMCLOUD_OPERATOR"))
|
||||
.build()
|
||||
.map_err(|e| anyhow!("Failed to build config: {}", e))?;
|
||||
let config: OperatorConfig = cfg
|
||||
.try_deserialize()
|
||||
.map_err(|e| anyhow!("Failed to parse config: {}", e))?;
|
||||
|
||||
let client = Client::try_default().await?;
|
||||
install_crd(&client).await?;
|
||||
|
@ -72,12 +88,12 @@ fn configure_tracing(enabled: bool) -> anyhow::Result<()> {
|
|||
.tracing()
|
||||
.with_exporter(opentelemetry_otlp::new_exporter().tonic())
|
||||
.with_trace_config(
|
||||
trace::config()
|
||||
opentelemetry_sdk::trace::config()
|
||||
.with_sampler(Sampler::AlwaysOn)
|
||||
.with_id_generator(RandomIdGenerator::default())
|
||||
.with_max_attributes_per_span(32)
|
||||
.with_max_events_per_span(32)
|
||||
.with_resource(OTELResource::new(vec![opentelemetry::KeyValue::new(
|
||||
.with_resource(Resource::new(vec![KeyValue::new(
|
||||
"service.name",
|
||||
"wasmcloud-operator",
|
||||
)])),
|
||||
|
@ -87,7 +103,7 @@ fn configure_tracing(enabled: bool) -> anyhow::Result<()> {
|
|||
let env_filter_layer = tracing_subscriber::EnvFilter::from_default_env();
|
||||
let log_layer = tracing_subscriber::fmt::layer()
|
||||
.with_writer(std::io::stderr)
|
||||
.with_ansi(atty::is(atty::Stream::Stderr));
|
||||
.with_ansi(std::io::stderr().is_terminal());
|
||||
|
||||
let otel_layer = tracing_opentelemetry::layer().with_tracer(tracer);
|
||||
|
||||
|
@ -183,3 +199,7 @@ async fn install_crd(client: &Client) -> anyhow::Result<()> {
|
|||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn version() -> &'static str {
|
||||
option_env!("CARGO_VERSION_INFO").unwrap_or(env!("CARGO_PKG_VERSION"))
|
||||
}
|
||||
|
|
1109
src/openapi.rs
1109
src/openapi.rs
File diff suppressed because it is too large
Load Diff
|
@ -2,7 +2,7 @@ use std::collections::{BTreeMap, HashMap, HashSet};
|
|||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{anyhow, Error};
|
||||
use async_nats::ConnectOptions;
|
||||
use async_nats::{Client as NatsClient, ConnectError, ConnectOptions};
|
||||
use axum::{
|
||||
body::Bytes,
|
||||
extract::{Path, State as AxumState},
|
||||
|
@ -12,21 +12,21 @@ use axum::{
|
|||
};
|
||||
use kube::{
|
||||
api::{Api, ListParams},
|
||||
client::Client,
|
||||
client::Client as KubeClient,
|
||||
core::{ListMeta, ObjectMeta},
|
||||
};
|
||||
use secrecy::{ExposeSecret, SecretString};
|
||||
use serde::Serialize;
|
||||
use serde_json::json;
|
||||
use serde_json::{json, Value};
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::error;
|
||||
use uuid::Uuid;
|
||||
use wadm::{
|
||||
model::Manifest,
|
||||
server::{
|
||||
DeleteResult, DeployResult, GetResult, ModelSummary, PutResult, StatusResult, StatusType,
|
||||
},
|
||||
use wadm_client::{error::ClientError, Client as WadmClient};
|
||||
use wadm_types::{
|
||||
api::{ModelSummary, Status, StatusType},
|
||||
Manifest,
|
||||
};
|
||||
|
||||
use wasmcloud_operator_types::v1alpha1::WasmCloudHostConfig;
|
||||
|
||||
use crate::{
|
||||
|
@ -46,6 +46,8 @@ use crate::{
|
|||
*/
|
||||
|
||||
const GROUP_VERSION: &str = "core.oam.dev/v1beta1";
|
||||
const KUBECTL_LAST_APPLIED_CONFIG_ANNOTATION: &str =
|
||||
"kubectl.kubernetes.io/last-applied-configuration";
|
||||
|
||||
pub struct AppError(Error);
|
||||
|
||||
|
@ -237,20 +239,17 @@ impl From<Vec<ModelSummary>> for ApplicationTable {
|
|||
}
|
||||
}
|
||||
|
||||
impl From<Vec<Manifest>> for ApplicationTable {
|
||||
fn from(manifests: Vec<Manifest>) -> Self {
|
||||
impl From<Vec<CombinedManifest>> for ApplicationTable {
|
||||
fn from(manifests: Vec<CombinedManifest>) -> Self {
|
||||
let mut table = Self::default();
|
||||
let rows = manifests
|
||||
.into_iter()
|
||||
.map(|m| TableRow {
|
||||
.map(|cm| TableRow {
|
||||
cells: vec![
|
||||
m.metadata.name,
|
||||
"N/A".to_string(),
|
||||
match m.metadata.annotations.get("version") {
|
||||
Some(v) => v.to_owned(),
|
||||
None => "N/A".to_string(),
|
||||
},
|
||||
"N/A".to_string(),
|
||||
cm.name(),
|
||||
cm.deployed_version(),
|
||||
cm.latest_version(),
|
||||
cm.status(),
|
||||
],
|
||||
})
|
||||
.collect();
|
||||
|
@ -260,6 +259,42 @@ impl From<Vec<Manifest>> for ApplicationTable {
|
|||
}
|
||||
}
|
||||
|
||||
struct CombinedManifest {
|
||||
manifest: Manifest,
|
||||
status: Status,
|
||||
}
|
||||
|
||||
impl CombinedManifest {
|
||||
pub(crate) fn new(manifest: Manifest, status: Status) -> Self {
|
||||
Self { manifest, status }
|
||||
}
|
||||
|
||||
pub(crate) fn name(&self) -> String {
|
||||
self.manifest.metadata.name.to_owned()
|
||||
}
|
||||
|
||||
pub(crate) fn deployed_version(&self) -> String {
|
||||
match self.manifest.metadata.annotations.get("version") {
|
||||
Some(v) => v.to_owned(),
|
||||
None => "N/A".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn latest_version(&self) -> String {
|
||||
self.status.version.to_owned()
|
||||
}
|
||||
|
||||
pub(crate) fn status(&self) -> String {
|
||||
match self.status.info.status_type {
|
||||
StatusType::Undeployed => "Undeployed",
|
||||
StatusType::Reconciling => "Reconciling",
|
||||
StatusType::Deployed => "Deployed",
|
||||
StatusType::Failed => "Failed",
|
||||
}
|
||||
.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
#[utoipa::path(
|
||||
post,
|
||||
path = "/apis/core.oam.dev/v1beta1/namespaces/{namespace}/applications"
|
||||
|
@ -269,110 +304,37 @@ pub async fn create_application(
|
|||
AxumState(state): AxumState<State>,
|
||||
body: Bytes,
|
||||
) -> impl IntoResponse {
|
||||
let client = match Client::try_default().await {
|
||||
let kube_client = match KubeClient::try_default().await {
|
||||
Ok(c) => c,
|
||||
Err(e) => return internal_error(anyhow!("unable to initialize kubernetes client: {}", e)),
|
||||
};
|
||||
let configs: Api<WasmCloudHostConfig> = Api::namespaced(client, &namespace);
|
||||
let configs: Api<WasmCloudHostConfig> = Api::namespaced(kube_client, &namespace);
|
||||
let cfgs = match configs.list(&ListParams::default()).await {
|
||||
Ok(objs) => objs,
|
||||
Err(e) => return internal_error(anyhow!("Unable to list cosmonic host configs: {}", e)),
|
||||
};
|
||||
|
||||
let mut cfgs = cfgs.iter();
|
||||
// TODO(joonas): Remove this once we move to pulling NATS creds+secrets from lattice instead of hosts.
|
||||
let mut lattice_id = String::new();
|
||||
let nats_client = loop {
|
||||
let Some(cfg) = cfgs.next() else {
|
||||
return internal_error(anyhow!(
|
||||
"unable to find Host Config to use for initializing nats client "
|
||||
));
|
||||
let (nats_client, lattice_id) =
|
||||
match get_lattice_connection(cfgs.into_iter(), state, namespace).await {
|
||||
Ok(data) => data,
|
||||
Err(resp) => return resp,
|
||||
};
|
||||
let cluster_url = cfg.spec.nats_address.clone();
|
||||
lattice_id = cfg.spec.lattice.clone();
|
||||
let lattice_name = cfg.metadata.name.clone().unwrap();
|
||||
let nst: NameNamespace = NameNamespace::new(lattice_name.clone(), namespace.clone());
|
||||
match get_client(&cluster_url, state.nats_creds.clone(), nst).await {
|
||||
Ok(c) => break Some(c),
|
||||
Err(e) => {
|
||||
error!("error connecting to nats: {}", e);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
let Some(nats_client) = nats_client else {
|
||||
return internal_error(anyhow!("unable to initialize nats client"));
|
||||
};
|
||||
let wadm_client = WadmClient::from_nats_client(&lattice_id, None, nats_client);
|
||||
|
||||
let model: serde_json::Value = match serde_json::from_slice(&body) {
|
||||
let manifest: Manifest = match serde_json::from_slice(&body) {
|
||||
Ok(v) => v,
|
||||
Err(e) => return internal_error(anyhow!("unable to decode the patch: {}", e)),
|
||||
};
|
||||
|
||||
let put =
|
||||
match wash_lib::app::put_model(&nats_client, Some(lattice_id.clone()), &model.to_string())
|
||||
.await
|
||||
{
|
||||
Ok(res) => res,
|
||||
let (application_name, _application_version) =
|
||||
match wadm_client.put_and_deploy_manifest(manifest).await {
|
||||
Ok(application_bits) => application_bits,
|
||||
Err(e) => return internal_error(anyhow!("could not deploy app: {}", e)),
|
||||
};
|
||||
|
||||
let model_name = match put.result {
|
||||
PutResult::Created | PutResult::NewVersion => put.name,
|
||||
_ => {
|
||||
// TODO(joonas): Add handling for the case where the model version
|
||||
// might already exist (from prior deploy or otherwise).
|
||||
return internal_error(anyhow!(
|
||||
"unexpected response from wadm: result={:?}, message={}",
|
||||
put.result,
|
||||
put.message
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let deploy = match wash_lib::app::deploy_model(
|
||||
&nats_client,
|
||||
Some(lattice_id.clone()),
|
||||
&model_name,
|
||||
Some(put.current_version.clone()),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(res) => res,
|
||||
Err(e) => return internal_error(anyhow!("could not deploy app: {}", e)),
|
||||
};
|
||||
|
||||
if deploy.result != DeployResult::Acknowledged {
|
||||
return internal_error(anyhow!(
|
||||
"unexpected response from wadm: result={:?}, message={}",
|
||||
deploy.result,
|
||||
deploy.message
|
||||
));
|
||||
}
|
||||
|
||||
// Get model from WADM for displaying in Kubernetes
|
||||
let get = match wash_lib::app::get_model_details(
|
||||
&nats_client,
|
||||
Some(lattice_id),
|
||||
&model_name,
|
||||
Some(put.current_version),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(res) => res,
|
||||
Err(e) => return internal_error(anyhow!("error getting deployed app: {}", e)),
|
||||
};
|
||||
|
||||
match get.result {
|
||||
GetResult::Success => Json(Application::new(model_name)).into_response(),
|
||||
// Either we received an error or could not find the deployed application, so return an error:
|
||||
_ => internal_error(anyhow!(
|
||||
"unexpected response from wadm: result={:?}, message={}",
|
||||
get.result,
|
||||
get.message
|
||||
)),
|
||||
}
|
||||
Json(Application::new(application_name)).into_response()
|
||||
}
|
||||
|
||||
#[utoipa::path(get, path = "/apis/core.oam.dev/v1beta1/applications")]
|
||||
|
@ -383,12 +345,12 @@ pub async fn list_all_applications(
|
|||
// TODO(joonas): Use lattices (or perhaps Controller specific/special creds) for instanciating NATS client.
|
||||
// TODO(joonas): Add watch support to stop Argo from spamming this endpoint every second.
|
||||
|
||||
let client = match Client::try_default().await {
|
||||
let kube_client = match KubeClient::try_default().await {
|
||||
Ok(c) => c,
|
||||
Err(e) => return internal_error(anyhow!("unable to initialize kubernetes client: {}", e)),
|
||||
};
|
||||
|
||||
let configs: Api<WasmCloudHostConfig> = Api::all(client);
|
||||
let configs: Api<WasmCloudHostConfig> = Api::all(kube_client);
|
||||
let cfgs = match configs.list(&ListParams::default()).await {
|
||||
Ok(objs) => objs,
|
||||
Err(e) => return internal_error(anyhow!("Unable to list cosmonic host configs: {}", e)),
|
||||
|
@ -405,7 +367,14 @@ pub async fn list_all_applications(
|
|||
let secret = map.get(&nst);
|
||||
// Prevent listing applications within a given lattice more than once
|
||||
if !lattices.contains(&lattice_id) {
|
||||
let result = match list_apps(&cfg.spec.nats_address, secret, lattice_id.clone()).await {
|
||||
let result = match list_apps(
|
||||
&cfg.spec.nats_address,
|
||||
&cfg.spec.nats_client_port,
|
||||
secret,
|
||||
lattice_id.clone(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(apps) => apps,
|
||||
Err(e) => return internal_error(anyhow!("unable to list applications: {}", e)),
|
||||
};
|
||||
|
@ -439,11 +408,11 @@ pub async fn list_applications(
|
|||
Path(namespace): Path<String>,
|
||||
AxumState(state): AxumState<State>,
|
||||
) -> impl IntoResponse {
|
||||
let client = match Client::try_default().await {
|
||||
let kube_client = match KubeClient::try_default().await {
|
||||
Ok(c) => c,
|
||||
Err(e) => return internal_error(anyhow!("unable to initialize kubernetes client: {}", e)),
|
||||
};
|
||||
let configs: Api<WasmCloudHostConfig> = Api::namespaced(client, &namespace);
|
||||
let configs: Api<WasmCloudHostConfig> = Api::namespaced(kube_client, &namespace);
|
||||
let cfgs = match configs.list(&ListParams::default()).await {
|
||||
Ok(objs) => objs,
|
||||
Err(e) => return internal_error(anyhow!("Unable to list cosmonic host configs: {}", e)),
|
||||
|
@ -459,7 +428,14 @@ pub async fn list_applications(
|
|||
let secret = map.get(&nst);
|
||||
// This is to check that we don't list a lattice more than once
|
||||
if !lattices.contains(&lattice_id) {
|
||||
let result = match list_apps(&cfg.spec.nats_address, secret, lattice_id.clone()).await {
|
||||
let result = match list_apps(
|
||||
&cfg.spec.nats_address,
|
||||
&cfg.spec.nats_client_port,
|
||||
secret,
|
||||
lattice_id.clone(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(apps) => apps,
|
||||
Err(e) => return internal_error(anyhow!("unable to list applications: {}", e)),
|
||||
};
|
||||
|
@ -485,37 +461,40 @@ pub async fn list_applications(
|
|||
|
||||
pub async fn list_apps(
|
||||
cluster_url: &str,
|
||||
port: &u16,
|
||||
creds: Option<&SecretString>,
|
||||
lattice_id: String,
|
||||
) -> Result<Vec<ModelSummary>, Error> {
|
||||
let client = match creds {
|
||||
let addr = format!("{}:{}", cluster_url, port);
|
||||
let nats_client = match creds {
|
||||
Some(creds) => {
|
||||
ConnectOptions::with_credentials(creds.expose_secret())?
|
||||
.connect(cluster_url)
|
||||
.connect(addr)
|
||||
.await?
|
||||
}
|
||||
None => ConnectOptions::new().connect(cluster_url).await?,
|
||||
None => ConnectOptions::new().connect(addr).await?,
|
||||
};
|
||||
let models = wash_lib::app::get_models(&client, Some(lattice_id)).await?;
|
||||
|
||||
Ok(models)
|
||||
let wadm_client = WadmClient::from_nats_client(&lattice_id, None, nats_client);
|
||||
Ok(wadm_client.list_manifests().await?)
|
||||
}
|
||||
|
||||
async fn get_client(
|
||||
pub async fn get_nats_client(
|
||||
cluster_url: &str,
|
||||
port: &u16,
|
||||
nats_creds: Arc<RwLock<HashMap<NameNamespace, SecretString>>>,
|
||||
namespace: NameNamespace,
|
||||
) -> Result<async_nats::Client, async_nats::ConnectError> {
|
||||
) -> Result<NatsClient, ConnectError> {
|
||||
let addr = format!("{}:{}", cluster_url, port);
|
||||
let creds = nats_creds.read().await;
|
||||
match creds.get(&namespace) {
|
||||
Some(creds) => {
|
||||
let creds = creds.expose_secret();
|
||||
ConnectOptions::with_credentials(creds)
|
||||
.expect("unable to create nats client")
|
||||
.connect(cluster_url)
|
||||
.connect(addr)
|
||||
.await
|
||||
}
|
||||
None => ConnectOptions::new().connect(cluster_url).await,
|
||||
None => ConnectOptions::new().connect(addr).await,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -528,119 +507,77 @@ pub async fn get_application(
|
|||
Path((namespace, name)): Path<(String, String)>,
|
||||
AxumState(state): AxumState<State>,
|
||||
) -> impl IntoResponse {
|
||||
let client = match Client::try_default().await {
|
||||
let kube_client = match KubeClient::try_default().await {
|
||||
Ok(c) => c,
|
||||
Err(e) => return internal_error(anyhow!("unable to initialize kubernetes client: {}", e)),
|
||||
};
|
||||
|
||||
let configs: Api<WasmCloudHostConfig> = Api::namespaced(client, &namespace);
|
||||
let configs: Api<WasmCloudHostConfig> = Api::namespaced(kube_client, &namespace);
|
||||
let cfgs = match configs.list(&ListParams::default()).await {
|
||||
Ok(objs) => objs,
|
||||
Err(e) => return internal_error(anyhow!("unable to list cosmonic host configs: {}", e)),
|
||||
};
|
||||
|
||||
let mut cfgs = cfgs.iter();
|
||||
// TODO(joonas): Remove this once we move to pulling NATS creds+secrets from lattice instead of hosts.
|
||||
let mut lattice_id = String::new();
|
||||
let nats_client = loop {
|
||||
let Some(cfg) = cfgs.next() else {
|
||||
return internal_error(anyhow!(
|
||||
"unable to find Host Config to use for initializing nats client "
|
||||
));
|
||||
let (nats_client, lattice_id) =
|
||||
match get_lattice_connection(cfgs.into_iter(), state, namespace.clone()).await {
|
||||
Ok(data) => data,
|
||||
Err(resp) => return resp,
|
||||
};
|
||||
let cluster_url = cfg.spec.nats_address.clone();
|
||||
lattice_id = cfg.spec.lattice.clone();
|
||||
let lattice_name = cfg.metadata.name.clone().unwrap();
|
||||
let nst: NameNamespace = NameNamespace::new(lattice_name.clone(), namespace.clone());
|
||||
match get_client(&cluster_url, state.nats_creds.clone(), nst).await {
|
||||
Ok(c) => break Some(c),
|
||||
Err(e) => {
|
||||
error!("error connecting to nats: {}", e);
|
||||
continue;
|
||||
let wadm_client = WadmClient::from_nats_client(&lattice_id, None, nats_client);
|
||||
|
||||
let manifest = match wadm_client.get_manifest(&name, None).await {
|
||||
Ok(m) => m,
|
||||
Err(e) => match e {
|
||||
ClientError::NotFound(_) => {
|
||||
return not_found_error(anyhow!("applications \"{}\" not found", name))
|
||||
}
|
||||
};
|
||||
_ => return internal_error(anyhow!("unable to request app from wadm: {}", e)),
|
||||
},
|
||||
};
|
||||
|
||||
let Some(nats_client) = nats_client else {
|
||||
return internal_error(anyhow!("unable to initialize nats client"));
|
||||
};
|
||||
|
||||
let get =
|
||||
match wash_lib::app::get_model_details(&nats_client, Some(lattice_id.clone()), &name, None)
|
||||
.await
|
||||
{
|
||||
Ok(res) => res,
|
||||
Err(e) => {
|
||||
return internal_error(anyhow!("unable to request app from wadm: {}", e));
|
||||
let status = match wadm_client.get_manifest_status(&name).await {
|
||||
Ok(s) => s,
|
||||
Err(e) => match e {
|
||||
ClientError::NotFound(_) => {
|
||||
return not_found_error(anyhow!("applications \"{}\" not found", name))
|
||||
}
|
||||
};
|
||||
|
||||
let status = match wash_lib::app::get_model_status(
|
||||
&nats_client,
|
||||
Some(lattice_id.clone()),
|
||||
&name,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(res) => res,
|
||||
Err(e) => {
|
||||
return internal_error(anyhow!("unable to request app status from wadm: {}", e));
|
||||
}
|
||||
_ => return internal_error(anyhow!("unable to request app status from wadm: {}", e)),
|
||||
},
|
||||
};
|
||||
|
||||
match status.result {
|
||||
StatusResult::Ok => {}
|
||||
StatusResult::NotFound => {
|
||||
return not_found_error(anyhow!("applications \"{}\" not found", name));
|
||||
match accept.into() {
|
||||
As::Table => {
|
||||
let combined_manifest = CombinedManifest::new(manifest, status);
|
||||
Json(ApplicationTable::from(vec![combined_manifest])).into_response()
|
||||
}
|
||||
StatusResult::Error => {
|
||||
return internal_error(anyhow!(
|
||||
"unexpected response from wadm: result={:?}, message={}",
|
||||
status.result,
|
||||
status.message
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
if get.result == GetResult::Success {
|
||||
if let Some(manifest) = get.manifest {
|
||||
let response = match accept.into() {
|
||||
As::Table => Json(ApplicationTable::from(vec![manifest])).into_response(),
|
||||
As::NotSpecified => {
|
||||
// TODO(joonas): This is a terrible hack, but for now it's what we need to do to satisfy Argo/Kubernetes since WADM doesn't support this metadata.
|
||||
let mut manifest_value = serde_json::to_value(&manifest).unwrap();
|
||||
// TODO(joonas): We should add lattice id to this as well, but we need it in every place where the application is listed.
|
||||
let ns = format!("{}/{}", &name, &manifest.version());
|
||||
let uid = Uuid::new_v5(&Uuid::NAMESPACE_OID, ns.as_bytes());
|
||||
manifest_value["metadata"]["uid"] = json!(uid.to_string());
|
||||
manifest_value["metadata"]["resourceVersion"] = json!(uid.to_string());
|
||||
manifest_value["metadata"]["namespace"] = json!(namespace);
|
||||
manifest_value["metadata"]["labels"] = json!({
|
||||
"app.kubernetes.io/instance": &name
|
||||
});
|
||||
// TODO(joonas): refactor status and the metadata inputs into a struct we could just serialize
|
||||
// The custom health check we provide for Argo will handle the case where status is missing, so this is fine for now.
|
||||
if let Some(status) = status.status {
|
||||
let phase = match status.info.status_type {
|
||||
StatusType::Undeployed => "Undeployed",
|
||||
StatusType::Reconciling => "Reconciling",
|
||||
StatusType::Deployed => "Deployed",
|
||||
StatusType::Failed => "Failed",
|
||||
};
|
||||
manifest_value["status"] = json!({
|
||||
"phase": phase,
|
||||
});
|
||||
}
|
||||
Json(manifest_value).into_response()
|
||||
}
|
||||
// TODO(joonas): Add better error handling here
|
||||
t => return internal_error(anyhow!("unknown type: {}", t)),
|
||||
As::NotSpecified => {
|
||||
// TODO(joonas): This is a terrible hack, but for now it's what we need to do to satisfy Argo/Kubernetes since WADM doesn't support this metadata.
|
||||
let mut manifest_value = serde_json::to_value(&manifest).unwrap();
|
||||
// TODO(joonas): We should add lattice id to this as well, but we need it in every place where the application is listed.
|
||||
let ns = format!("{}/{}", &name, &manifest.version());
|
||||
let uid = Uuid::new_v5(&Uuid::NAMESPACE_OID, ns.as_bytes());
|
||||
manifest_value["metadata"]["uid"] = json!(uid.to_string());
|
||||
manifest_value["metadata"]["resourceVersion"] = json!(uid.to_string());
|
||||
manifest_value["metadata"]["namespace"] = json!(namespace);
|
||||
manifest_value["metadata"]["labels"] = json!({
|
||||
"app.kubernetes.io/instance": &name
|
||||
});
|
||||
// TODO(joonas): refactor status and the metadata inputs into a struct we could just serialize
|
||||
// The custom health check we provide for Argo will handle the case where status is missing, so this is fine for now.
|
||||
let phase = match status.info.status_type {
|
||||
StatusType::Undeployed => "Undeployed",
|
||||
StatusType::Reconciling => "Reconciling",
|
||||
StatusType::Deployed => "Deployed",
|
||||
StatusType::Failed => "Failed",
|
||||
};
|
||||
return response;
|
||||
manifest_value["status"] = json!({
|
||||
"phase": phase,
|
||||
});
|
||||
Json(manifest_value).into_response()
|
||||
}
|
||||
};
|
||||
|
||||
not_found_error(anyhow!("applications \"{}\" not found", name))
|
||||
// TODO(joonas): Add better error handling here
|
||||
t => internal_error(anyhow!("unknown type: {}", t)),
|
||||
}
|
||||
}
|
||||
|
||||
#[utoipa::path(
|
||||
|
@ -652,123 +589,102 @@ pub async fn patch_application(
|
|||
AxumState(state): AxumState<State>,
|
||||
body: Bytes,
|
||||
) -> impl IntoResponse {
|
||||
let client = match Client::try_default().await {
|
||||
let kube_client = match KubeClient::try_default().await {
|
||||
Ok(c) => c,
|
||||
Err(e) => return internal_error(anyhow!("unable to initialize kubernetes client: {}", e)),
|
||||
};
|
||||
let configs: Api<WasmCloudHostConfig> = Api::namespaced(client, &namespace);
|
||||
let configs: Api<WasmCloudHostConfig> = Api::namespaced(kube_client, &namespace);
|
||||
let cfgs = match configs.list(&ListParams::default()).await {
|
||||
Ok(objs) => objs,
|
||||
Err(e) => return internal_error(anyhow!("unable to list cosmonic host configs: {}", e)),
|
||||
};
|
||||
|
||||
let mut cfgs = cfgs.iter();
|
||||
// TODO(joonas): Remove this once we move to pulling NATS creds+secrets from lattice instead of hosts.
|
||||
let mut lattice_id = String::new();
|
||||
let nats_client = loop {
|
||||
let Some(cfg) = cfgs.next() else {
|
||||
return internal_error(anyhow!(
|
||||
"unable to find Host Config to use for initializing nats client "
|
||||
));
|
||||
let (nats_client, lattice_id) =
|
||||
match get_lattice_connection(cfgs.into_iter(), state, namespace).await {
|
||||
Ok(data) => data,
|
||||
Err(resp) => return resp,
|
||||
};
|
||||
let cluster_url = cfg.spec.nats_address.clone();
|
||||
lattice_id = cfg.spec.lattice.clone();
|
||||
let lattice_name = cfg.metadata.name.clone().unwrap();
|
||||
let nst: NameNamespace = NameNamespace::new(lattice_name.clone(), namespace.clone());
|
||||
match get_client(&cluster_url, state.nats_creds.clone(), nst).await {
|
||||
Ok(c) => break Some(c),
|
||||
Err(e) => {
|
||||
error!("error connecting to nats: {}", e);
|
||||
continue;
|
||||
let wadm_client = WadmClient::from_nats_client(&lattice_id, None, nats_client);
|
||||
let current_manifest = match wadm_client.get_manifest(&name, None).await {
|
||||
Ok(m) => m,
|
||||
Err(e) => match e {
|
||||
ClientError::NotFound(_) => {
|
||||
return not_found_error(anyhow!("applications \"{}\" not found", name))
|
||||
}
|
||||
};
|
||||
_ => return internal_error(anyhow!("unable to request app from wadm: {}", e)),
|
||||
},
|
||||
};
|
||||
|
||||
let Some(nats_client) = nats_client else {
|
||||
return internal_error(anyhow!("unable to initialize nats client"));
|
||||
};
|
||||
|
||||
// Fist, check if the model exists.
|
||||
// TODO(joonas): we should likely fetch the version of the manifest that's running in Kubernetes
|
||||
// TODO(joonas): Should this use model.status instead of model.get?
|
||||
let get =
|
||||
match wash_lib::app::get_model_details(&nats_client, Some(lattice_id.clone()), &name, None)
|
||||
.await
|
||||
{
|
||||
Ok(res) => res,
|
||||
Err(e) => {
|
||||
return internal_error(anyhow!("unable to find app: {}", e));
|
||||
}
|
||||
};
|
||||
|
||||
if get.result != GetResult::Success {
|
||||
return internal_error(anyhow!(
|
||||
"unexpected response from wadm: result={:?}, reason={}",
|
||||
get.result,
|
||||
get.message
|
||||
));
|
||||
}
|
||||
|
||||
// Prepare manifest for patching
|
||||
let Some(manifest) = get.manifest else {
|
||||
return internal_error(anyhow!("no manifest was found for app: {}", &name));
|
||||
};
|
||||
|
||||
let mut model = serde_json::to_value(manifest).unwrap();
|
||||
let mut current = serde_json::to_value(current_manifest).unwrap();
|
||||
// Parse the Kubernetes-provided RFC 7386 patch
|
||||
let patch: serde_json::Value = match serde_json::from_slice(&body) {
|
||||
let patch = match serde_json::from_slice::<Value>(&body) {
|
||||
Ok(p) => p,
|
||||
Err(e) => return internal_error(anyhow!("unable to decode the patch: {}", e)),
|
||||
};
|
||||
|
||||
// Attempt to patch the currently running version
|
||||
json_patch::merge(&mut model, &patch);
|
||||
// Remove kubectl.kubernetes.io/last-applied-configuration annotation before
|
||||
// we compare against the patch, otherwise we'll always end up creating a new version.
|
||||
let last_applied_configuration = current
|
||||
.get_mut("metadata")
|
||||
.and_then(|metadata| metadata.get_mut("annotations"))
|
||||
.and_then(|annotations| annotations.as_object_mut())
|
||||
.and_then(|annotations| annotations.remove(KUBECTL_LAST_APPLIED_CONFIG_ANNOTATION));
|
||||
|
||||
let put =
|
||||
match wash_lib::app::put_model(&nats_client, Some(lattice_id.clone()), &model.to_string())
|
||||
.await
|
||||
{
|
||||
Ok(res) => res,
|
||||
Err(e) => {
|
||||
return internal_error(anyhow!("could not update manifest for deploy: {}", e))
|
||||
}
|
||||
};
|
||||
// TODO(joonas): This doesn't quite work as intended at the moment,
|
||||
// there are some differences in terms like replicas vs. instances:
|
||||
// * Add(AddOperation { path: "/spec/components/0/traits/0/properties/replicas", value: Number(1) }),
|
||||
// * Remove(RemoveOperation { path: "/spec/components/0/traits/0/properties/instances" }),
|
||||
//
|
||||
// which cause the server to always patch. Also, top-level entries such
|
||||
// as apiVersion, kind and metadata are always removed.
|
||||
//
|
||||
// let diff = json_patch::diff(¤t, &patch);
|
||||
// if diff.is_empty() {
|
||||
// // If there's nothing to patch, return early.
|
||||
// return Json(()).into_response();
|
||||
// };
|
||||
|
||||
match put.result {
|
||||
PutResult::NewVersion => {}
|
||||
_ => {
|
||||
// For now we have to check the error message to see if we can continue,
|
||||
// despite getting an error.
|
||||
if !put.message.contains("already exists") {
|
||||
return internal_error(anyhow!(
|
||||
"unexpected response from wadm: result={:?}, message={}",
|
||||
put.result,
|
||||
put.message
|
||||
));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let deploy = match wash_lib::app::deploy_model(
|
||||
&nats_client,
|
||||
Some(lattice_id),
|
||||
&name,
|
||||
Some(put.current_version),
|
||||
)
|
||||
.await
|
||||
// Remove current version so that either a new version is generated,
|
||||
// or the one set in the incoming patch gets used.
|
||||
if let Some(annotations) = current
|
||||
.get_mut("metadata")
|
||||
.and_then(|metadata| metadata.get_mut("annotations"))
|
||||
.and_then(|annotations| annotations.as_object_mut())
|
||||
{
|
||||
Ok(res) => res,
|
||||
Err(e) => return internal_error(anyhow!("could not deploy app: {}", e)),
|
||||
annotations.remove("version");
|
||||
}
|
||||
|
||||
// Attempt to patch the currently running version
|
||||
json_patch::merge(&mut current, &patch);
|
||||
|
||||
// Re-insert "kubectl.kubernetes.io/last-applied-configuration" if one was set
|
||||
if let Some(last_applied_config) = last_applied_configuration {
|
||||
if let Some(annotations) = current
|
||||
.get_mut("metadata")
|
||||
.and_then(|metadata| metadata.get_mut("annotations"))
|
||||
.and_then(|annotations| annotations.as_object_mut())
|
||||
{
|
||||
annotations.insert(
|
||||
KUBECTL_LAST_APPLIED_CONFIG_ANNOTATION.to_string(),
|
||||
last_applied_config,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
let updated_manifest = match serde_json::from_value::<Manifest>(current) {
|
||||
Ok(m) => m,
|
||||
Err(e) => return internal_error(anyhow!("unable to patch the application: {}", e)),
|
||||
};
|
||||
|
||||
match deploy.result {
|
||||
DeployResult::Acknowledged => Json(Application::new(name)).into_response(),
|
||||
DeployResult::Error => internal_error(anyhow!(
|
||||
"unexpected response from wadm: result={:?}, message={}",
|
||||
deploy.result,
|
||||
deploy.message
|
||||
)),
|
||||
DeployResult::NotFound => not_found_error(anyhow!("applications \"{}\" not found", &name)),
|
||||
match wadm_client.put_and_deploy_manifest(updated_manifest).await {
|
||||
Ok((app_name, _)) => Json(Application::new(app_name)).into_response(),
|
||||
Err(e) => match e {
|
||||
ClientError::NotFound(_) => {
|
||||
not_found_error(anyhow!("applications \"{}\" not found", &name))
|
||||
}
|
||||
_ => internal_error(anyhow!("could not update application: {}", e)),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -780,95 +696,60 @@ pub async fn delete_application(
|
|||
Path((namespace, name)): Path<(String, String)>,
|
||||
AxumState(state): AxumState<State>,
|
||||
) -> impl IntoResponse {
|
||||
let client = match Client::try_default().await {
|
||||
let kube_client = match KubeClient::try_default().await {
|
||||
Ok(c) => c,
|
||||
Err(e) => return internal_error(anyhow!("unable to initialize kubernetes client: {}", e)),
|
||||
};
|
||||
|
||||
let configs: Api<WasmCloudHostConfig> = Api::namespaced(client, &namespace);
|
||||
let configs: Api<WasmCloudHostConfig> = Api::namespaced(kube_client, &namespace);
|
||||
let cfgs = match configs.list(&ListParams::default()).await {
|
||||
Ok(objs) => objs,
|
||||
Err(e) => return internal_error(anyhow!("unable to list cosmonic host configs: {}", e)),
|
||||
};
|
||||
|
||||
let mut cfgs = cfgs.iter();
|
||||
// TODO(joonas): Remove this once we move to pulling NATS creds+secrets from lattice instead of hosts.
|
||||
let mut lattice_id = String::new();
|
||||
let nats_client = loop {
|
||||
let Some(cfg) = cfgs.next() else {
|
||||
return internal_error(anyhow!(
|
||||
"unable to find Host Config to use for initializing nats client "
|
||||
));
|
||||
let (nats_client, lattice_id) =
|
||||
match get_lattice_connection(cfgs.into_iter(), state, namespace).await {
|
||||
Ok(data) => data,
|
||||
Err(resp) => return resp,
|
||||
};
|
||||
let cluster_url = cfg.spec.nats_address.clone();
|
||||
lattice_id = cfg.spec.lattice.clone();
|
||||
let lattice_name = cfg.metadata.name.clone().unwrap();
|
||||
let nst: NameNamespace = NameNamespace::new(lattice_name.clone(), namespace.clone());
|
||||
match get_client(&cluster_url, state.nats_creds.clone(), nst).await {
|
||||
Ok(c) => break Some(c),
|
||||
|
||||
let wadm_client = WadmClient::from_nats_client(&lattice_id, None, nats_client);
|
||||
match wadm_client.delete_manifest(&name, None).await {
|
||||
Ok(_) => Json(Application::new(name)).into_response(),
|
||||
Err(e) => match e {
|
||||
ClientError::NotFound(_) => not_found_error(anyhow!("apps \"{}\" not found", name)),
|
||||
_ => internal_error(anyhow!("could not delete app: {}", e)),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_lattice_connection(
|
||||
cfgs: impl Iterator<Item = WasmCloudHostConfig>,
|
||||
state: State,
|
||||
namespace: String,
|
||||
) -> Result<(NatsClient, String), Response> {
|
||||
let connection_data =
|
||||
cfgs.map(|cfg| (cfg, namespace.clone()))
|
||||
.filter_map(|(cfg, namespace)| {
|
||||
let cluster_url = cfg.spec.nats_address;
|
||||
let lattice_id = cfg.spec.lattice;
|
||||
let lattice_name = cfg.metadata.name?;
|
||||
let nst: NameNamespace = NameNamespace::new(lattice_name, namespace);
|
||||
let port = cfg.spec.nats_client_port;
|
||||
Some((cluster_url, nst, lattice_id, port))
|
||||
});
|
||||
|
||||
for (cluster_url, ns, lattice_id, port) in connection_data {
|
||||
match get_nats_client(&cluster_url, &port, state.nats_creds.clone(), ns).await {
|
||||
Ok(c) => return Ok((c, lattice_id)),
|
||||
Err(e) => {
|
||||
error!("error connecting to nats: {}", e);
|
||||
error!(err = %e, %lattice_id, "error connecting to nats");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
let Some(nats_client) = nats_client else {
|
||||
return internal_error(anyhow!("unable to initialize nats client"));
|
||||
};
|
||||
|
||||
// Fist, check if the model exists.
|
||||
// TODO(joonas): Replace this with wash_lib::app::get_model_status once
|
||||
// https://github.com/wasmCloud/wasmCloud/pull/1151 ships.
|
||||
let status = match wash_lib::app::get_model_status(
|
||||
&nats_client,
|
||||
Some(lattice_id.clone()),
|
||||
&name,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(res) => res,
|
||||
Err(e) => {
|
||||
return internal_error(anyhow!("unable to request app status from wadm: {}", e));
|
||||
}
|
||||
};
|
||||
|
||||
match status.result {
|
||||
StatusResult::Ok => {
|
||||
// Proceed
|
||||
}
|
||||
StatusResult::NotFound => {
|
||||
return not_found_error(anyhow!("apps \"{}\" not found", name));
|
||||
}
|
||||
StatusResult::Error => {
|
||||
return internal_error(anyhow!(
|
||||
"unexpected response from status command: result={:?}, message={}",
|
||||
status.result,
|
||||
status.message
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let delete = match wash_lib::app::delete_model_version(
|
||||
&nats_client,
|
||||
Some(lattice_id),
|
||||
&name,
|
||||
None,
|
||||
true,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(res) => res,
|
||||
Err(e) => return internal_error(anyhow!("error deleting app: {}", e)),
|
||||
};
|
||||
|
||||
match delete.result {
|
||||
// TODO(joonas): do we need to handle DeleteResult::Noop differently?
|
||||
DeleteResult::Deleted | DeleteResult::Noop => Json(Application::new(name)).into_response(),
|
||||
DeleteResult::Error => internal_error(anyhow!(
|
||||
"unexpected response from wadm: result={:?}, message={}",
|
||||
delete.result,
|
||||
delete.message
|
||||
)),
|
||||
}
|
||||
|
||||
// If we get here, we couldn't get a NATS client, so return an error
|
||||
Err(internal_error(anyhow!("unable to initialize nats client")))
|
||||
}
|
||||
|
|
|
@ -0,0 +1,794 @@
|
|||
use std::collections::{BTreeMap, HashMap, HashSet};
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Result;
|
||||
use async_nats::{
|
||||
jetstream,
|
||||
jetstream::{
|
||||
consumer::{pull::Config, Consumer},
|
||||
stream::{Config as StreamConfig, RetentionPolicy, Source, StorageType, SubjectTransform},
|
||||
AckKind,
|
||||
},
|
||||
Client,
|
||||
};
|
||||
use cloudevents::{AttributesReader, Event as CloudEvent};
|
||||
use futures::StreamExt;
|
||||
use k8s_openapi::api::core::v1::{Pod, Service, ServicePort, ServiceSpec};
|
||||
use k8s_openapi::api::discovery::v1::{Endpoint, EndpointConditions, EndpointPort, EndpointSlice};
|
||||
use k8s_openapi::apimachinery::pkg::apis::meta::v1::OwnerReference;
|
||||
use kube::{
|
||||
api::{Api, DeleteParams, ListParams, Patch, PatchParams},
|
||||
client::Client as KubeClient,
|
||||
Resource,
|
||||
};
|
||||
use tokio::sync::{mpsc, RwLock};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::{debug, error, warn};
|
||||
use wadm::events::{Event, ManifestPublished, ManifestUnpublished};
|
||||
use wadm_client::Client as WadmClient;
|
||||
use wadm_types::{api::ModelSummary, Component, Manifest, Properties, Trait, TraitProperty};
|
||||
use wasmcloud_operator_types::v1alpha1::WasmCloudHostConfig;
|
||||
|
||||
use crate::controller::{
|
||||
common_labels, CLUSTER_CONFIG_FINALIZER, SERVICE_FINALIZER,
|
||||
WASMCLOUD_OPERATOR_HOST_LABEL_PREFIX, WASMCLOUD_OPERATOR_MANAGED_BY_LABEL_REQUIREMENT,
|
||||
};
|
||||
|
||||
const CONSUMER_PREFIX: &str = "wasmcloud_operator_service";
|
||||
// This should probably be exposed by wadm somewhere
|
||||
const WADM_EVENT_STREAM_NAME: &str = "wadm_events";
|
||||
const OPERATOR_STREAM_NAME: &str = "wasmcloud_operator_events";
|
||||
const OPERATOR_STREAM_SUBJECT: &str = "wasmcloud_operator_events.*.>";
|
||||
|
||||
/// Commands that can be sent to the watcher to trigger an update or removal of a service.
|
||||
#[derive(Clone, Debug)]
|
||||
enum WatcherCommand {
|
||||
UpsertService(ServiceParams),
|
||||
RemoveService {
|
||||
name: String,
|
||||
namespaces: HashSet<String>,
|
||||
},
|
||||
RemoveServices {
|
||||
namespaces: HashSet<String>,
|
||||
},
|
||||
}
|
||||
|
||||
/// Parameters for creating or updating a service in the cluster.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ServiceParams {
|
||||
name: String,
|
||||
namespaces: HashSet<String>,
|
||||
lattice_id: String,
|
||||
port: u16,
|
||||
host_labels: Option<HashMap<String, String>>,
|
||||
}
|
||||
|
||||
/// Watches for new services to be created in the cluster for a partcular lattice and creates or
|
||||
/// updates them as necessary.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Watcher {
|
||||
namespaces: HashSet<String>,
|
||||
lattice_id: String,
|
||||
nats_client: Client,
|
||||
shutdown: CancellationToken,
|
||||
consumer: Consumer<Config>,
|
||||
tx: mpsc::UnboundedSender<WatcherCommand>,
|
||||
}
|
||||
|
||||
impl Drop for Watcher {
|
||||
fn drop(&mut self) {
|
||||
self.shutdown.cancel();
|
||||
}
|
||||
}
|
||||
|
||||
impl Watcher {
|
||||
/// Creates a new watcher for a particular lattice.
|
||||
fn new(
|
||||
namespace: String,
|
||||
lattice_id: String,
|
||||
nats_client: Client,
|
||||
consumer: Consumer<Config>,
|
||||
tx: mpsc::UnboundedSender<WatcherCommand>,
|
||||
) -> Self {
|
||||
let watcher = Self {
|
||||
namespaces: HashSet::from([namespace]),
|
||||
nats_client,
|
||||
lattice_id: lattice_id.clone(),
|
||||
consumer,
|
||||
shutdown: CancellationToken::new(),
|
||||
tx,
|
||||
};
|
||||
|
||||
// TODO is there a better way to handle this?
|
||||
let watcher_dup = watcher.clone();
|
||||
tokio::spawn(async move {
|
||||
tokio::select! {
|
||||
_ = watcher_dup.shutdown.cancelled() => {
|
||||
debug!(%lattice_id, "Service watcher shutting down for lattice");
|
||||
}
|
||||
_ = watcher_dup.watch_events(&watcher_dup.consumer) => {
|
||||
error!(%lattice_id, "Service watcher for lattice has stopped");
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
watcher
|
||||
}
|
||||
|
||||
/// Watches for new events on the mirrored wadm_events stream and processes them.
|
||||
async fn watch_events(&self, consumer: &Consumer<Config>) -> Result<()> {
|
||||
let mut messages = consumer.stream().messages().await?;
|
||||
while let Some(message) = messages.next().await {
|
||||
if let Ok(message) = message {
|
||||
match self.handle_event(message.clone()) {
|
||||
Ok(_) => message
|
||||
.ack()
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(err=%e, "Error acking message");
|
||||
e
|
||||
})
|
||||
.ok(),
|
||||
Err(_) => message
|
||||
.ack_with(AckKind::Nak(None))
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(err=%e, "Error nacking message");
|
||||
e
|
||||
})
|
||||
.ok(),
|
||||
};
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Handles a new event from the consumer.
|
||||
fn handle_event(&self, message: async_nats::jetstream::Message) -> Result<()> {
|
||||
let event = serde_json::from_slice::<CloudEvent>(&message.payload)
|
||||
.map_err(|e| anyhow::anyhow!("Error parsing cloudevent: {}", e))?;
|
||||
let evt = match Event::try_from(event.clone()) {
|
||||
Ok(evt) => evt,
|
||||
Err(e) => {
|
||||
warn!(
|
||||
err=%e,
|
||||
event_type=%event.ty(),
|
||||
"Error converting cloudevent to wadm event",
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
match evt {
|
||||
Event::ManifestPublished(mp) => {
|
||||
let name = mp.manifest.metadata.name.clone();
|
||||
self.handle_manifest_published(mp).map_err(|e| {
|
||||
error!(lattice_id = %self.lattice_id, manifest = name, "Error handling manifest published event: {}", e);
|
||||
e
|
||||
})?;
|
||||
}
|
||||
Event::ManifestUnpublished(mu) => {
|
||||
let name = mu.name.clone();
|
||||
self.handle_manifest_unpublished(mu).map_err(|e| {
|
||||
error!(lattice_id = %self.lattice_id, manifest = name, "Error handling manifest unpublished event: {}", e);
|
||||
e
|
||||
})?;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Handles a manifest published event.
|
||||
fn handle_manifest_published(&self, mp: ManifestPublished) -> Result<()> {
|
||||
debug!(manifest=?mp, "Handling manifest published event");
|
||||
let manifest = mp.manifest;
|
||||
if let Some(httpserver_service) = http_server_component(&manifest) {
|
||||
if let Ok(addr) = httpserver_service.address.parse::<SocketAddr>() {
|
||||
debug!(manifest = %manifest.metadata.name, "Upserting service for manifest");
|
||||
self.tx
|
||||
.send(WatcherCommand::UpsertService(ServiceParams {
|
||||
name: manifest.metadata.name.clone(),
|
||||
lattice_id: self.lattice_id.clone(),
|
||||
port: addr.port(),
|
||||
namespaces: self.namespaces.clone(),
|
||||
host_labels: httpserver_service.labels,
|
||||
}))
|
||||
.map_err(|e| anyhow::anyhow!("Error sending command to watcher: {}", e))?;
|
||||
} else {
|
||||
error!(
|
||||
address = httpserver_service.address,
|
||||
"Invalid address in manifest"
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Handles a manifest unpublished event.
|
||||
fn handle_manifest_unpublished(&self, mu: ManifestUnpublished) -> Result<()> {
|
||||
self.tx
|
||||
.send(WatcherCommand::RemoveService {
|
||||
name: mu.name,
|
||||
namespaces: self.namespaces.clone(),
|
||||
})
|
||||
.map_err(|e| anyhow::anyhow!("Error sending command to watcher: {}", e))?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Waits for commands to update or remove services based on manifest deploy/undeploy events in
|
||||
/// underlying lattices.
|
||||
/// Each lattice is managed by a [`Watcher`] which listens for events relayed by a NATS consumer and
|
||||
/// issues commands to create or update services in the cluster.
|
||||
pub struct ServiceWatcher {
|
||||
watchers: Arc<RwLock<HashMap<String, Watcher>>>,
|
||||
sender: mpsc::UnboundedSender<WatcherCommand>,
|
||||
stream_replicas: u16,
|
||||
}
|
||||
|
||||
impl ServiceWatcher {
|
||||
/// Creates a new service watcher.
|
||||
pub fn new(k8s_client: KubeClient, stream_replicas: u16) -> Self {
|
||||
let (tx, mut rx) = mpsc::unbounded_channel::<WatcherCommand>();
|
||||
|
||||
let client = k8s_client.clone();
|
||||
tokio::spawn(async move {
|
||||
while let Some(cmd) = rx.recv().await {
|
||||
match cmd {
|
||||
WatcherCommand::UpsertService(params) => {
|
||||
create_or_update_service(client.clone(), ¶ms, None)
|
||||
.await
|
||||
.map_err(|e| error!(err=%e, "Error creating/updating service"))
|
||||
.ok();
|
||||
}
|
||||
WatcherCommand::RemoveService { name, namespaces } => {
|
||||
for namespace in namespaces {
|
||||
delete_service(client.clone(), &namespace, name.as_str())
|
||||
.await
|
||||
.map_err(|e| error!(err=%e, %namespace, "Error deleting service"))
|
||||
.ok();
|
||||
}
|
||||
}
|
||||
WatcherCommand::RemoveServices { namespaces } => {
|
||||
for namespace in namespaces {
|
||||
delete_services(client.clone(), namespace.as_str())
|
||||
.await
|
||||
.map_err(|e| error!(err=%e, %namespace, "Error deleting service"))
|
||||
.ok();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Self {
|
||||
watchers: Arc::new(RwLock::new(HashMap::new())),
|
||||
sender: tx,
|
||||
stream_replicas,
|
||||
}
|
||||
}
|
||||
|
||||
/// Reconciles services for a set of apps in a lattice.
|
||||
/// This intended to be called by the controller whenever it reconciles state.
|
||||
pub async fn reconcile_services(&self, apps: Vec<ModelSummary>, lattice_id: String) {
|
||||
if let Some(watcher) = self.watchers.read().await.get(lattice_id.as_str()) {
|
||||
let wadm_client =
|
||||
WadmClient::from_nats_client(&lattice_id, None, watcher.nats_client.clone());
|
||||
for app in apps {
|
||||
if app.deployed_version.is_none() {
|
||||
continue;
|
||||
}
|
||||
match wadm_client
|
||||
.get_manifest(app.name.as_str(), app.deployed_version.as_deref())
|
||||
.await
|
||||
{
|
||||
Ok(manifest) => {
|
||||
let _ = watcher.handle_manifest_published(ManifestPublished {
|
||||
manifest,
|
||||
}).map_err(|e| error!(err = %e, %lattice_id, app = %app.name, "failed to trigger service reconciliation for app"));
|
||||
}
|
||||
Err(e) => warn!(err=%e, "Unable to retrieve model"),
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// Create a new [`Watcher`] for a lattice.
|
||||
/// It will return early if a [`Watcher`] already exists for the lattice.
|
||||
pub async fn watch(&self, client: Client, namespace: String, lattice_id: String) -> Result<()> {
|
||||
// If we're already watching this lattice then return early
|
||||
// TODO is there an easy way to do this with a read lock?
|
||||
let mut watchers = self.watchers.write().await;
|
||||
if let Some(watcher) = watchers.get_mut(lattice_id.as_str()) {
|
||||
watcher.namespaces.insert(namespace);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let js = jetstream::new(client.clone());
|
||||
|
||||
// Should we also be doing this when we first create the ServiceWatcher?
|
||||
let stream = js
|
||||
.get_or_create_stream(StreamConfig {
|
||||
name: OPERATOR_STREAM_NAME.to_string(),
|
||||
description: Some(
|
||||
"Stream for wadm events consumed by the wasmCloud K8s Operator".to_string(),
|
||||
),
|
||||
max_age: wadm::DEFAULT_EXPIRY_TIME,
|
||||
retention: RetentionPolicy::WorkQueue,
|
||||
storage: StorageType::File,
|
||||
allow_rollup: false,
|
||||
num_replicas: self.stream_replicas as usize,
|
||||
mirror: Some(Source {
|
||||
name: WADM_EVENT_STREAM_NAME.to_string(),
|
||||
subject_transforms: vec![SubjectTransform {
|
||||
source: wadm::DEFAULT_WADM_EVENTS_TOPIC.to_string(),
|
||||
destination: OPERATOR_STREAM_SUBJECT.replacen('*', "{{wildcard(1)}}", 1),
|
||||
}],
|
||||
..Default::default()
|
||||
}),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
|
||||
let consumer_name = format!("{CONSUMER_PREFIX}-{}", lattice_id.clone());
|
||||
let consumer = stream
|
||||
.get_or_create_consumer(
|
||||
consumer_name.as_str(),
|
||||
Config {
|
||||
durable_name: Some(consumer_name.clone()),
|
||||
description: Some("Consumer created by the wasmCloud K8s Operator to watch for new service endpoints in wadm manifests".to_string()),
|
||||
ack_policy: jetstream::consumer::AckPolicy::Explicit,
|
||||
ack_wait: std::time::Duration::from_secs(2),
|
||||
max_deliver: 3,
|
||||
deliver_policy: async_nats::jetstream::consumer::DeliverPolicy::All,
|
||||
filter_subject: OPERATOR_STREAM_SUBJECT.replacen('*', &lattice_id, 1),
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
let watcher = Watcher::new(
|
||||
namespace,
|
||||
lattice_id.clone(),
|
||||
client.clone(),
|
||||
consumer,
|
||||
self.sender.clone(),
|
||||
);
|
||||
watchers.insert(lattice_id.clone(), watcher);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Stops watching a lattice by stopping the underlying [`Watcher`] if no namespaces require it.
|
||||
pub async fn stop_watch(&self, lattice_id: String, namespace: String) -> Result<()> {
|
||||
let mut watchers = self.watchers.write().await;
|
||||
if let Some(watcher) = watchers.get_mut(lattice_id.as_str()) {
|
||||
watcher.namespaces.remove(namespace.as_str());
|
||||
if watcher.namespaces.is_empty() {
|
||||
watchers.remove(lattice_id.as_str());
|
||||
}
|
||||
|
||||
self.sender
|
||||
.send(WatcherCommand::RemoveServices {
|
||||
namespaces: HashSet::from([namespace]),
|
||||
})
|
||||
.map_err(|e| anyhow::anyhow!("Error sending command to watcher: {}", e))?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates or updates a service in the cluster based on the provided parameters.
|
||||
pub async fn create_or_update_service(
|
||||
k8s_client: KubeClient,
|
||||
params: &ServiceParams,
|
||||
owner_ref: Option<OwnerReference>,
|
||||
) -> Result<()> {
|
||||
let mut labels = common_labels();
|
||||
labels.extend(BTreeMap::from([(
|
||||
"app.kubernetes.io/name".to_string(),
|
||||
params.name.to_string(),
|
||||
)]));
|
||||
let mut selector = BTreeMap::new();
|
||||
let mut create_endpoints = false;
|
||||
if let Some(host_labels) = ¶ms.host_labels {
|
||||
selector.insert(
|
||||
"app.kubernetes.io/name".to_string(),
|
||||
"wasmcloud".to_string(),
|
||||
);
|
||||
selector.extend(
|
||||
host_labels
|
||||
.iter()
|
||||
.map(|(k, v)| (format_service_selector(k), v.clone())),
|
||||
);
|
||||
} else {
|
||||
create_endpoints = true;
|
||||
}
|
||||
|
||||
for namespace in params.namespaces.iter() {
|
||||
let api = Api::<Service>::namespaced(k8s_client.clone(), namespace);
|
||||
|
||||
let mut svc = Service {
|
||||
metadata: kube::api::ObjectMeta {
|
||||
name: Some(params.name.clone()),
|
||||
labels: Some(labels.clone()),
|
||||
finalizers: Some(vec![SERVICE_FINALIZER.to_string()]),
|
||||
namespace: Some(namespace.clone()),
|
||||
..Default::default()
|
||||
},
|
||||
spec: Some(ServiceSpec {
|
||||
selector: Some(selector.clone()),
|
||||
ports: Some(vec![ServicePort {
|
||||
name: Some("http".to_string()),
|
||||
port: params.port as i32,
|
||||
protocol: Some("TCP".to_string()),
|
||||
..Default::default()
|
||||
}]),
|
||||
..Default::default()
|
||||
}),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
if let Some(owner_ref) = &owner_ref {
|
||||
svc.metadata.owner_references = Some(vec![owner_ref.clone()]);
|
||||
}
|
||||
|
||||
debug!(service =? svc, %namespace, "Creating/updating service");
|
||||
|
||||
let svc = api
|
||||
.patch(
|
||||
params.name.as_str(),
|
||||
&PatchParams::apply(SERVICE_FINALIZER),
|
||||
&Patch::Apply(svc),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(err = %e, "Error creating/updating service");
|
||||
e
|
||||
})?;
|
||||
|
||||
if create_endpoints {
|
||||
let crds =
|
||||
Api::<WasmCloudHostConfig>::namespaced(k8s_client.clone(), namespace.as_str());
|
||||
let pods = Api::<Pod>::namespaced(k8s_client.clone(), namespace.as_str());
|
||||
let endpoints =
|
||||
Api::<EndpointSlice>::namespaced(k8s_client.clone(), namespace.as_str());
|
||||
|
||||
let configs = crds.list(&ListParams::default()).await?;
|
||||
let mut ips = vec![];
|
||||
for cfg in configs {
|
||||
if cfg.spec.lattice == params.lattice_id {
|
||||
let name = cfg.metadata.name.unwrap();
|
||||
let pods = pods
|
||||
.list(&ListParams {
|
||||
label_selector: Some(format!(
|
||||
"app.kubernetes.io/name=wasmcloud,app.kubernetes.io/instance={name}"
|
||||
)),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
let pod_ips = pods
|
||||
.into_iter()
|
||||
.filter_map(|pod| {
|
||||
pod.status.and_then(|status| {
|
||||
if status.phase == Some("Running".to_string()) {
|
||||
status.pod_ips
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
})
|
||||
.flatten();
|
||||
ips.extend(pod_ips);
|
||||
}
|
||||
}
|
||||
|
||||
// Create an EndpointSlice if we're working with a daemonscaler without label requirements.
|
||||
// This means we need to manually map the endpoints to each wasmCloud host belonging to the
|
||||
// lattice in this namespace.
|
||||
// TODO: This can actually span namespaces, same with the label requirements so should we
|
||||
// be querying _all_ CRDs to find all available pods?
|
||||
if !ips.is_empty() {
|
||||
let mut labels = labels.clone();
|
||||
labels.insert(
|
||||
"kubernetes.io/service-name".to_string(),
|
||||
params.name.clone(),
|
||||
);
|
||||
let endpoint_slice = EndpointSlice {
|
||||
metadata: kube::api::ObjectMeta {
|
||||
name: Some(params.name.clone()),
|
||||
labels: Some(labels.clone()),
|
||||
// SAFETY: This should be safe according to the kube.rs docs, which specifiy
|
||||
// that anything created through the apiserver should have a populated field
|
||||
// here.
|
||||
owner_references: Some(vec![svc.controller_owner_ref(&()).unwrap()]),
|
||||
..Default::default()
|
||||
},
|
||||
// TODO is there a way to figure this out automatically? Maybe based on the number
|
||||
// of IPs that come back or what they are
|
||||
address_type: "IPv4".to_string(),
|
||||
endpoints: ips
|
||||
.iter()
|
||||
.filter_map(|ip| {
|
||||
ip.ip.as_ref().map(|i| Endpoint {
|
||||
addresses: vec![i.clone()],
|
||||
conditions: Some(EndpointConditions {
|
||||
ready: Some(true),
|
||||
serving: Some(true),
|
||||
terminating: None,
|
||||
}),
|
||||
hostname: None,
|
||||
target_ref: None,
|
||||
..Default::default()
|
||||
})
|
||||
})
|
||||
.collect(),
|
||||
ports: Some(vec![EndpointPort {
|
||||
name: Some("http".to_string()),
|
||||
port: Some(params.port as i32),
|
||||
protocol: Some("TCP".to_string()),
|
||||
app_protocol: None,
|
||||
}]),
|
||||
};
|
||||
// TODO this should probably do the usual get/patch or get/replce bit since I don't
|
||||
// think this is fully syncing endpoints when pods are deleted. Also we should update
|
||||
// this based on pod status since we may end up having stale IPs
|
||||
endpoints
|
||||
.patch(
|
||||
params.name.as_str(),
|
||||
&PatchParams::apply(CLUSTER_CONFIG_FINALIZER),
|
||||
&Patch::Apply(endpoint_slice),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!("Error creating endpoint slice: {}", e);
|
||||
e
|
||||
})?;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
debug!("Created/updated service");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct HttpServerComponent {
|
||||
labels: Option<HashMap<String, String>>,
|
||||
address: String,
|
||||
}
|
||||
|
||||
/// Finds the httpserver component in a manifest and returns the details needed to create a service
|
||||
fn http_server_component(manifest: &Manifest) -> Option<HttpServerComponent> {
|
||||
let components: Vec<&Component> = manifest
|
||||
.components()
|
||||
// filter just for the wasmCloud httpserver for now. This should actually just filter for
|
||||
// the http capability
|
||||
.filter(|c| {
|
||||
if let Properties::Capability { properties } = &c.properties {
|
||||
if properties
|
||||
.image
|
||||
.starts_with("ghcr.io/wasmcloud/http-server")
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
})
|
||||
.collect();
|
||||
|
||||
let scalers: Vec<&Trait> = components
|
||||
.iter()
|
||||
.filter_map(|c| {
|
||||
if let Some(t) = &c.traits {
|
||||
for trait_ in t {
|
||||
if trait_.trait_type == "daemonscaler" {
|
||||
return Some(trait_);
|
||||
}
|
||||
}
|
||||
};
|
||||
None
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Right now we only support daemonscalers, so if we don't find any then we have nothing to do
|
||||
if scalers.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let links: Vec<&Trait> = components
|
||||
.iter()
|
||||
.filter_map(|c| {
|
||||
if let Some(t) = &c.traits {
|
||||
for trait_ in t {
|
||||
if trait_.trait_type == "link" {
|
||||
return Some(trait_);
|
||||
}
|
||||
}
|
||||
};
|
||||
None
|
||||
})
|
||||
.collect();
|
||||
|
||||
let mut details = HttpServerComponent::default();
|
||||
let mut should_create_service = false;
|
||||
for l in links {
|
||||
match &l.properties {
|
||||
TraitProperty::Link(props) => {
|
||||
if props.namespace == "wasi"
|
||||
&& props.package == "http"
|
||||
&& props.interfaces.contains(&"incoming-handler".to_string())
|
||||
&& props.source.is_some()
|
||||
{
|
||||
let source = props.source.as_ref().unwrap();
|
||||
for cp in source.config.iter() {
|
||||
if let Some(addr) = cp.properties.as_ref().and_then(|p| p.get("address")) {
|
||||
details.address.clone_from(addr);
|
||||
should_create_service = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
TraitProperty::SpreadScaler(scaler) => {
|
||||
for spread in scaler.spread.iter() {
|
||||
spread.requirements.iter().for_each(|(k, v)| {
|
||||
details
|
||||
.labels
|
||||
.get_or_insert_with(HashMap::new)
|
||||
.insert(k.clone(), v.clone());
|
||||
});
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
if should_create_service {
|
||||
return Some(details);
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Deletes a service in the cluster.
|
||||
async fn delete_service(k8s_client: KubeClient, namespace: &str, name: &str) -> Result<()> {
|
||||
debug!(namespace = namespace, name = name, "Deleting service");
|
||||
let api = Api::<Service>::namespaced(k8s_client.clone(), namespace);
|
||||
// Remove the finalizer so that the service can be deleted
|
||||
let mut svc = api.get(name).await?;
|
||||
svc.metadata.finalizers = None;
|
||||
svc.metadata.managed_fields = None;
|
||||
api.patch(
|
||||
name,
|
||||
&PatchParams::apply(SERVICE_FINALIZER).force(),
|
||||
&Patch::Apply(svc),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!("Error removing finalizer from service: {}", e);
|
||||
e
|
||||
})?;
|
||||
|
||||
api.delete(name, &DeleteParams::default()).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn delete_services(k8s_client: KubeClient, namespace: &str) -> Result<()> {
|
||||
let api = Api::<Service>::namespaced(k8s_client.clone(), namespace);
|
||||
let services = api
|
||||
.list(&ListParams {
|
||||
label_selector: Some(WASMCLOUD_OPERATOR_MANAGED_BY_LABEL_REQUIREMENT.to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
for svc in services {
|
||||
let name = svc.metadata.name.unwrap();
|
||||
delete_service(k8s_client.clone(), namespace, name.as_str()).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Formats a service selector for a given name.
|
||||
fn format_service_selector(name: &str) -> String {
|
||||
format!("{WASMCLOUD_OPERATOR_HOST_LABEL_PREFIX}/{}", name)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_daemonscaler_should_return() {
|
||||
let manifest = r#"
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: rust-http-hello-world
|
||||
annotations:
|
||||
version: v0.0.1
|
||||
description: "HTTP hello world demo in Rust, using the WebAssembly Component Model and WebAssembly Interfaces Types (WIT)"
|
||||
experimental: "true"
|
||||
spec:
|
||||
components:
|
||||
- name: http-hello-world
|
||||
type: component
|
||||
properties:
|
||||
image: wasmcloud.azurecr.io/http-hello-world:0.1.0
|
||||
id: helloworld
|
||||
traits:
|
||||
# Govern the spread/scheduling of the actor
|
||||
- type: spreadscaler
|
||||
properties:
|
||||
replicas: 5000
|
||||
# Add a capability provider that mediates HTTP access
|
||||
- name: httpserver
|
||||
type: capability
|
||||
properties:
|
||||
image: ghcr.io/wasmcloud/http-server:0.20.0
|
||||
id: httpserver
|
||||
traits:
|
||||
# Link the HTTP server, and inform it to listen on port 8080
|
||||
# on the local machine
|
||||
- type: link
|
||||
properties:
|
||||
target: http-hello-world
|
||||
namespace: wasi
|
||||
package: http
|
||||
interfaces: [incoming-handler]
|
||||
source_config:
|
||||
- name: default-http
|
||||
properties:
|
||||
address: 0.0.0.0:8080
|
||||
- type: daemonscaler
|
||||
properties:
|
||||
replicas: 1
|
||||
"#;
|
||||
let m = serde_yaml::from_str::<Manifest>(manifest).unwrap();
|
||||
let component = http_server_component(&m);
|
||||
assert!(component.is_some());
|
||||
|
||||
let manifest = r#"
|
||||
apiVersion: core.oam.dev/v1beta1
|
||||
kind: Application
|
||||
metadata:
|
||||
name: rust-http-hello-world
|
||||
annotations:
|
||||
version: v0.0.1
|
||||
description: "HTTP hello world demo in Rust, using the WebAssembly Component Model and WebAssembly Interfaces Types (WIT)"
|
||||
experimental: "true"
|
||||
spec:
|
||||
components:
|
||||
- name: http-hello-world
|
||||
type: component
|
||||
properties:
|
||||
image: wasmcloud.azurecr.io/http-hello-world:0.1.0
|
||||
id: helloworld
|
||||
traits:
|
||||
# Govern the spread/scheduling of the actor
|
||||
- type: spreadscaler
|
||||
properties:
|
||||
replicas: 5000
|
||||
# Add a capability provider that mediates HTTP access
|
||||
- name: httpserver
|
||||
type: capability
|
||||
properties:
|
||||
image: ghcr.io/wasmcloud/http-server:0.20.0
|
||||
id: httpserver
|
||||
traits:
|
||||
# Link the HTTP server, and inform it to listen on port 8080
|
||||
# on the local machine
|
||||
- type: link
|
||||
properties:
|
||||
target: http-hello-world
|
||||
namespace: wasi
|
||||
package: http
|
||||
interfaces: [incoming-handler]
|
||||
source_config:
|
||||
- name: default-http
|
||||
properties:
|
||||
address: 0.0.0.0:8080
|
||||
"#;
|
||||
let m = serde_yaml::from_str::<Manifest>(manifest).unwrap();
|
||||
let component = http_server_component(&m);
|
||||
assert!(component.is_none());
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue