add zh-v1.0 dir

This commit is contained in:
guoxudong 2021-06-03 16:26:33 +08:00
parent 0aeb824627
commit a4af24b542
146 changed files with 10531 additions and 9 deletions

View File

@ -4,11 +4,11 @@
"description": "The label for version v1.0"
},
"sidebar.docs.category.Overview": {
"message": "Overview",
"message": "概述",
"description": "The label for category Overview in sidebar docs"
},
"sidebar.docs.category.Getting Started": {
"message": "Getting Started",
"message": "快速开始",
"description": "The label for category Getting Started in sidebar docs"
},
"sidebar.docs.category.Application Deployment": {
@ -16,7 +16,7 @@
"description": "The label for category Application Deployment in sidebar docs"
},
"sidebar.docs.category.More Operations": {
"message": "More Operations",
"message": "更多操作",
"description": "The label for category More Operations in sidebar docs"
},
"sidebar.docs.category.Platform Operation Guide": {
@ -24,7 +24,7 @@
"description": "The label for category Platform Operation Guide in sidebar docs"
},
"sidebar.docs.category.Defining Components": {
"message": "Defining Components",
"message": "定义 Components",
"description": "The label for category Defining Components in sidebar docs"
},
"sidebar.docs.category.CUE": {
@ -40,11 +40,11 @@
"description": "The label for category Raw Template in sidebar docs"
},
"sidebar.docs.category.Defining Cloud Service": {
"message": "Defining Cloud Service",
"message": "定义 Cloud Service",
"description": "The label for category Defining Cloud Service in sidebar docs"
},
"sidebar.docs.category.Defining Traits": {
"message": "Defining Traits",
"message": "定义 Traits",
"description": "The label for category Defining Traits in sidebar docs"
},
"sidebar.docs.category.Hands-on Lab": {
@ -52,7 +52,7 @@
"description": "The label for category Hands-on Lab in sidebar docs"
},
"sidebar.docs.category.Using KubeVela CLI": {
"message": "Using KubeVela CLI",
"message": "使用 KubeVela CLI",
"description": "The label for category Using KubeVela CLI in sidebar docs"
},
"sidebar.docs.category.Appfile": {
@ -60,11 +60,11 @@
"description": "The label for category Appfile in sidebar docs"
},
"sidebar.docs.category.Managing Applications": {
"message": "Managing Applications",
"message": "管理 Applications",
"description": "The label for category Managing Applications in sidebar docs"
},
"sidebar.docs.category.References": {
"message": "References",
"message": "参考",
"description": "The label for category References in sidebar docs"
},
"sidebar.docs.category.CLI": {

View File

@ -0,0 +1,27 @@
![alt](resources/KubeVela-03.png)
*Make shipping applications more enjoyable.*
# KubeVela
KubeVela is a modern application engine that adapts to your application's needs, not the other way around.
## Community
- Slack: [CNCF Slack](https://slack.cncf.io/) #kubevela channel
- Gitter: [Discussion](https://gitter.im/oam-dev/community)
- Bi-weekly Community Call: [Meeting Notes](https://docs.google.com/document/d/1nqdFEyULekyksFHtFvgvFAYE-0AMHKoS3RMnaKsarjs)
## Installation
Installation guide is available on [this section](./install).
## Quick Start
Quick start is available on [this section](./quick-start).
## Contributing
Check out [CONTRIBUTING](https://github.com/oam-dev/kubevela/blob/master/CONTRIBUTING.md) to see how to develop with KubeVela.
## Code of Conduct
KubeVela adopts the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).

View File

@ -0,0 +1,119 @@
---
title: 安装(进阶)
---
## 带着证书管理器安装 KubeVela
KubeVela 可以使用证书管理器为你的应用生成证书,但是你需要提前安装好证书管理器。
```shell script
helm repo add jetstack https://charts.jetstack.io
helm repo update
helm install cert-manager jetstack/cert-manager --namespace cert-manager --version v1.2.0 --create-namespace --set installCRDs=true
```
安装 KubeVela 同时启用证书管理器:
```shell script
helm install --create-namespace -n vela-system --set admissionWebhooks.certManager.enabled=true kubevela kubevela/vela-core
```
## 安装预发布版
在使用 `helm search` 命令时,添加标记参数 `--devel` 即可搜索出预发布版。预发布版的版本号格式为 `<next_version>-rc-master`,例如 `0.4.0-rc-master`,代表的是一个基于 `master` 分支构建的发布候选版。
```shell script
helm search repo kubevela/vela-core -l --devel
```
```console
NAME CHART VERSION APP VERSION DESCRIPTION
kubevela/vela-core 0.4.0-rc-master 0.4.0-rc-master A Helm chart for KubeVela core
kubevela/vela-core 0.3.2 0.3.2 A Helm chart for KubeVela core
kubevela/vela-core 0.3.1 0.3.1 A Helm chart for KubeVela core
```
然后尝试跟着以下的命令安装一个预发布版。
```shell script
helm install --create-namespace -n vela-system kubevela kubevela/vela-core --version <next_version>-rc-master
```
```console
NAME: kubevela
LAST DEPLOYED: Thu Apr 1 19:41:30 2021
NAMESPACE: vela-system
STATUS: deployed
REVISION: 1
NOTES:
Welcome to use the KubeVela! Enjoy your shipping application journey!
```
## 升级
### 第一步 更新 Helm 仓库
通过以下命令获取 KubeVela 最新发布的 chart
```shell
helm repo update
helm search repo kubevela/vela-core -l
```
### 第二步 升级 KubeVela 的 CRDs
```shell
kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/charts/vela-core/crds/core.oam.dev_componentdefinitions.yaml
kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/charts/vela-core/crds/core.oam.dev_workloaddefinitions.yaml
kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/charts/vela-core/crds/core.oam.dev_traitdefinitions.yaml
kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/charts/vela-core/crds/core.oam.dev_applications.yaml
kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/charts/vela-core/crds/core.oam.dev_approllouts.yaml
kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/charts/vela-core/crds/core.oam.dev_applicationrevisions.yaml
kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/charts/vela-core/crds/core.oam.dev_scopedefinitions.yaml
kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/charts/vela-core/crds/core.oam.dev_appdeployments.yaml
kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/charts/vela-core/crds/core.oam.dev_applicationcontexts.yaml
```
> 提示:如果看到诸如 `* is invalid: spec.scope: Invalid value: "Namespaced": filed is immutable` 之类的错误,请删除出错的 CRD 后再重新安装。
```shell
kubectl delete crd \
scopedefinitions.core.oam.dev \
traitdefinitions.core.oam.dev \
workloaddefinitions.core.oam.dev
```
### 第三步 升级 KubeVela Helm chart
```shell
helm upgrade --install --create-namespace --namespace vela-system kubevela kubevela/vela-core --version <the_new_version>
```
## 卸载
运行命令:
```shell script
helm uninstall -n vela-system kubevela
rm -r ~/.vela
```
命令会卸载 KubeVela 服务和相关的依赖组件,同时会清理本地 CLI 的缓存
然后清理 CRDs默认情况下helm 不会移除 CRDs
```shell script
kubectl delete crd \
appdeployments.core.oam.dev \
applicationconfigurations.core.oam.dev \
applicationcontexts.core.oam.dev \
applicationrevisions.core.oam.dev \
applications.core.oam.dev \
approllouts.core.oam.dev \
componentdefinitions.core.oam.dev \
components.core.oam.dev \
containerizedworkloads.core.oam.dev \
healthscopes.core.oam.dev \
manualscalertraits.core.oam.dev \
podspecworkloads.standard.oam.dev \
scopedefinitions.core.oam.dev \
traitdefinitions.core.oam.dev \
workloaddefinitions.core.oam.dev
```

View File

@ -0,0 +1,235 @@
---
title: Application CRD
---
本部分将逐步介绍如何使用 `Application` 对象来定义你的应用,并以声明式的方式进行相应的操作。
## 示例
下面的示例应用声明了一个具有 *Worker* 工作负载类型的 `backend` 组件和具有 *Web Service* 工作负载类型的 `frontend` 组件。
此外,`frontend`组件声明了具有 `sidecar``autoscaler``trait` 运维能力,这意味着工作负载将自动注入 `fluentd` 的sidecar并可以根据CPU使用情况触发1-10个副本进行扩展。
```yaml
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: website
spec:
components:
- name: backend
type: worker
properties:
image: busybox
cmd:
- sleep
- '1000'
- name: frontend
type: webservice
properties:
image: nginx
traits:
- type: autoscaler
properties:
min: 1
max: 10
cpuPercent: 60
- type: sidecar
properties:
name: "sidecar-test"
image: "fluentd"
```
### 部署应用
部署上述的 application yaml文件, 然后应用启动
```shell
$ kubectl get application -o yaml
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: website
....
status:
components:
- apiVersion: core.oam.dev/v1alpha2
kind: Component
name: backend
- apiVersion: core.oam.dev/v1alpha2
kind: Component
name: frontend
....
status: running
```
你可以看到一个命名为 `frontend` 并带有被注入的容器 `fluentd` 的 Deployment 正在运行。
```shell
$ kubectl get deploy frontend
NAME READY UP-TO-DATE AVAILABLE AGE
frontend 1/1 1 1 100m
```
另一个命名为 `backend` 的 Deployment 也在运行。
```shell
$ kubectl get deploy backend
NAME READY UP-TO-DATE AVAILABLE AGE
backend 1/1 1 1 100m
```
同样被 `autoscaler` trait 创建出来的还有一个 HPA 。
```shell
$ kubectl get HorizontalPodAutoscaler frontend
NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
frontend Deployment/frontend <unknown>/50% 1 10 1 101m
```
## 背后的原理
在上面的示例中, `type: worker` 指的是该组件的字段内容(即下面的 `properties` 字段中的内容)将遵从名为 `worker``ComponentDefinition` 对象中的规范定义,如下所示:
```yaml
apiVersion: core.oam.dev/v1beta1
kind: ComponentDefinition
metadata:
name: worker
annotations:
definition.oam.dev/description: "Describes long-running, scalable, containerized services that running at backend. They do NOT have network endpoint to receive external network traffic."
spec:
workload:
definition:
apiVersion: apps/v1
kind: Deployment
schematic:
cue:
template: |
output: {
apiVersion: "apps/v1"
kind: "Deployment"
spec: {
selector: matchLabels: {
"app.oam.dev/component": context.name
}
template: {
metadata: labels: {
"app.oam.dev/component": context.name
}
spec: {
containers: [{
name: context.name
image: parameter.image
if parameter["cmd"] != _|_ {
command: parameter.cmd
}
}]
}
}
}
}
parameter: {
image: string
cmd?: [...string]
}
```
因此,`backend` 的 `properties` 部分仅支持两个参数:`image` 和 `cmd`。这是由定义的 `.spec.template` 字段中的 `parameter` 列表执行的。
类似的可扩展抽象机制也同样适用于 traits(运维能力)。
例如,`frontend` 中的 `typeautoscaler` 指的是组件对应的 trait 的字段规范(即 trait 的 `properties` 部分)
将由名为 `autoscaler``TraitDefinition` 对象执行,如下所示:
```yaml
apiVersion: core.oam.dev/v1beta1
kind: TraitDefinition
metadata:
annotations:
definition.oam.dev/description: "configure k8s HPA for Deployment"
name: hpa
spec:
appliesToWorkloads:
- webservice
- worker
schematic:
cue:
template: |
outputs: hpa: {
apiVersion: "autoscaling/v2beta2"
kind: "HorizontalPodAutoscaler"
metadata: name: context.name
spec: {
scaleTargetRef: {
apiVersion: "apps/v1"
kind: "Deployment"
name: context.name
}
minReplicas: parameter.min
maxReplicas: parameter.max
metrics: [{
type: "Resource"
resource: {
name: "cpu"
target: {
type: "Utilization"
averageUtilization: parameter.cpuUtil
}
}
}]
}
}
parameter: {
min: *1 | int
max: *10 | int
cpuUtil: *50 | int
}
```
应用同样有一个`sidecar`的运维能力
```yaml
apiVersion: core.oam.dev/v1beta1
kind: TraitDefinition
metadata:
annotations:
definition.oam.dev/description: "add sidecar to the app"
name: sidecar
spec:
appliesToWorkloads:
- webservice
- worker
schematic:
cue:
template: |-
patch: {
// +patchKey=name
spec: template: spec: containers: [parameter]
}
parameter: {
name: string
image: string
command?: [...string]
}
```
在业务用户使用之前我们认为所有用于定义的对象Definition Object都已经由平台团队声明并安装完毕了。所以业务用户将需要专注于应用`Application`)本身。
请注意KubeVela 的终端用户(业务研发)不需要了解定义对象,他们只需要学习如何使用平台已经安装的能力,这些能力通常还可以被可视化的表单展示出来(或者通过 JSON schema 对接其他方式)。请从[由定义生成前端表单](/docs/platform-engineers/openapi-v3-json-schema)部分的文档了解如何实现。
### 惯例和"标准协议"
在应用(`Application` 资源)部署到 Kubernetes 集群后KubeVela 运行时将遵循以下 “标准协议”和惯例来生成和管理底层资源实例。
| Label | 描述 |
| :--: | :---------: |
|`workload.oam.dev/type=<component definition name>` | 其对应 `ComponentDefinition` 的名称 |
|`trait.oam.dev/type=<trait definition name>` | 其对应 `TraitDefinition` 的名称 |
|`app.oam.dev/name=<app name>` | 它所属的应用的名称 |
|`app.oam.dev/component=<component name>` | 它所属的组件的名称 |
|`trait.oam.dev/resource=<name of trait resource instance>` | 运维能力资源实例的名称 |
|`app.oam.dev/appRevision=<name of app revision>` | 它所属的应用revision的名称 |

View File

@ -0,0 +1,41 @@
---
title: vela
---
```
vela [flags]
```
### Options
```
-e, --env string specify environment name for application
-h, --help help for vela
```
### SEE ALSO
* [vela cap](vela_cap) - Manage capability centers and installing/uninstalling capabilities
* [vela completion](vela_completion) - Output shell completion code for the specified shell (bash or zsh)
* [vela config](vela_config) - Manage configurations
* [vela delete](vela_delete) - Delete an application
* [vela env](vela_env) - Manage environments
* [vela exec](vela_exec) - Execute command in a container
* [vela export](vela_export) - Export deploy manifests from appfile
* [vela help](vela_help) - Help about any command
* [vela init](vela_init) - Create scaffold for an application
* [vela logs](vela_logs) - Tail logs for application
* [vela ls](vela_ls) - List applications
* [vela port-forward](vela_port-forward) - Forward local ports to services in an application
* [vela show](vela_show) - Show the reference doc for a workload type or trait
* [vela status](vela_status) - Show status of an application
* [vela system](vela_system) - System management utilities
* [vela template](vela_template) - Manage templates
* [vela traits](vela_traits) - List traits
* [vela up](vela_up) - Apply an appfile
* [vela version](vela_version) - Prints out build version information
* [vela workloads](vela_workloads) - List workloads
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,31 @@
---
title: vela cap
---
Manage capability centers and installing/uninstalling capabilities
### Synopsis
Manage capability centers and installing/uninstalling capabilities
### Options
```
-h, --help help for cap
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela](vela) -
* [vela cap center](vela_cap_center) - Manage Capability Center
* [vela cap install](vela_cap_install) - Install capability into cluster
* [vela cap ls](vela_cap_ls) - List capabilities from cap-center
* [vela cap uninstall](vela_cap_uninstall) - Uninstall capability from cluster
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,31 @@
---
title: vela cap center
---
Manage Capability Center
### Synopsis
Manage Capability Center with config, sync, list
### Options
```
-h, --help help for center
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela cap](vela_cap) - Manage capability centers and installing/uninstalling capabilities
* [vela cap center config](vela_cap_center_config) - Configure (add if not exist) a capability center, default is local (built-in capabilities)
* [vela cap center ls](vela_cap_center_ls) - List all capability centers
* [vela cap center remove](vela_cap_center_remove) - Remove specified capability center
* [vela cap center sync](vela_cap_center_sync) - Sync capabilities from remote center, default to sync all centers
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,38 @@
---
title: vela cap center config
---
Configure (add if not exist) a capability center, default is local (built-in capabilities)
### Synopsis
Configure (add if not exist) a capability center, default is local (built-in capabilities)
```
vela cap center config <centerName> <centerURL> [flags]
```
### Examples
```
vela cap center config mycenter https://github.com/oam-dev/catalog/cap-center
```
### Options
```
-h, --help help for config
-t, --token string Github Repo token
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela cap center](vela_cap_center) - Manage Capability Center
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,37 @@
---
title: vela cap center ls
---
List all capability centers
### Synopsis
List all configured capability centers
```
vela cap center ls [flags]
```
### Examples
```
vela cap center ls
```
### Options
```
-h, --help help for ls
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela cap center](vela_cap_center) - Manage Capability Center
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,37 @@
---
title: vela cap center remove
---
Remove specified capability center
### Synopsis
Remove specified capability center
```
vela cap center remove <centerName> [flags]
```
### Examples
```
vela cap center remove mycenter
```
### Options
```
-h, --help help for remove
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela cap center](vela_cap_center) - Manage Capability Center
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,37 @@
---
title: vela cap center sync
---
Sync capabilities from remote center, default to sync all centers
### Synopsis
Sync capabilities from remote center, default to sync all centers
```
vela cap center sync [centerName] [flags]
```
### Examples
```
vela cap center sync mycenter
```
### Options
```
-h, --help help for sync
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela cap center](vela_cap_center) - Manage Capability Center
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,38 @@
---
title: vela cap install
---
Install capability into cluster
### Synopsis
Install capability into cluster
```
vela cap install <center>/<name> [flags]
```
### Examples
```
vela cap install mycenter/route
```
### Options
```
-h, --help help for install
-t, --token string Github Repo token
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela cap](vela_cap) - Manage capability centers and installing/uninstalling capabilities
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,37 @@
---
title: vela cap ls
---
List capabilities from cap-center
### Synopsis
List capabilities from cap-center
```
vela cap ls [cap-center] [flags]
```
### Examples
```
vela cap ls
```
### Options
```
-h, --help help for ls
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela cap](vela_cap) - Manage capability centers and installing/uninstalling capabilities
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,38 @@
---
title: vela cap uninstall
---
Uninstall capability from cluster
### Synopsis
Uninstall capability from cluster
```
vela cap uninstall <name> [flags]
```
### Examples
```
vela cap uninstall route
```
### Options
```
-h, --help help for uninstall
-t, --token string Github Repo token
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela cap](vela_cap) - Manage capability centers and installing/uninstalling capabilities
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,32 @@
---
title: vela completion
---
Output shell completion code for the specified shell (bash or zsh)
### Synopsis
Output shell completion code for the specified shell (bash or zsh).
The shell code must be evaluated to provide interactive completion
of vela commands.
### Options
```
-h, --help help for completion
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela](vela) -
* [vela completion bash](vela_completion_bash) - generate autocompletions script for bash
* [vela completion zsh](vela_completion_zsh) - generate autocompletions script for zsh
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,41 @@
---
title: vela completion bash
---
generate autocompletions script for bash
### Synopsis
Generate the autocompletion script for Vela for the bash shell.
To load completions in your current shell session:
$ source <(vela completion bash)
To load completions for every new session, execute once:
Linux:
$ vela completion bash > /etc/bash_completion.d/vela
MacOS:
$ vela completion bash > /usr/local/etc/bash_completion.d/vela
```
vela completion bash
```
### Options
```
-h, --help help for bash
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela completion](vela_completion) - Output shell completion code for the specified shell (bash or zsh)
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,38 @@
---
title: vela completion zsh
---
generate autocompletions script for zsh
### Synopsis
Generate the autocompletion script for Vela for the zsh shell.
To load completions in your current shell session:
$ source <(vela completion zsh)
To load completions for every new session, execute once:
$ vela completion zsh > "${fpath[1]}/_vela"
```
vela completion zsh
```
### Options
```
-h, --help help for zsh
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela completion](vela_completion) - Output shell completion code for the specified shell (bash or zsh)
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,37 @@
---
title: vela components
---
List components
### Synopsis
List components
```
vela components
```
### Examples
```
vela components
```
### Options
```
-h, --help help for workloads
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela](vela) -
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,31 @@
---
title: vela config
---
Manage configurations
### Synopsis
Manage configurations
### Options
```
-h, --help help for config
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela](vela) -
* [vela config del](vela_config_del) - Delete config
* [vela config get](vela_config_get) - Get data for a config
* [vela config ls](vela_config_ls) - List configs
* [vela config set](vela_config_set) - Set data for a config
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,37 @@
---
title: vela config del
---
Delete config
### Synopsis
Delete config
```
vela config del
```
### Examples
```
vela config del <config-name>
```
### Options
```
-h, --help help for del
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela config](vela_config) - Manage configurations
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,37 @@
---
title: vela config get
---
Get data for a config
### Synopsis
Get data for a config
```
vela config get
```
### Examples
```
vela config get <config-name>
```
### Options
```
-h, --help help for get
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela config](vela_config) - Manage configurations
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,37 @@
---
title: vela config ls
---
List configs
### Synopsis
List all configs
```
vela config ls
```
### Examples
```
vela config ls
```
### Options
```
-h, --help help for ls
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela config](vela_config) - Manage configurations
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,37 @@
---
title: vela config set
---
Set data for a config
### Synopsis
Set data for a config
```
vela config set
```
### Examples
```
vela config set <config-name> KEY=VALUE K2=V2
```
### Options
```
-h, --help help for set
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela config](vela_config) - Manage configurations
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,38 @@
---
title: vela delete
---
Delete an application
### Synopsis
Delete an application
```
vela delete APP_NAME
```
### Examples
```
vela delete frontend
```
### Options
```
-h, --help help for delete
--svc string delete only the specified service in this app
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela](vela) -
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,31 @@
---
title: vela env
---
Manage environments
### Synopsis
Manage environments
### Options
```
-h, --help help for env
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela](vela) -
* [vela env delete](vela_env_delete) - Delete environment
* [vela env init](vela_env_init) - Create environments
* [vela env ls](vela_env_ls) - List environments
* [vela env set](vela_env_set) - Set an environment
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,37 @@
---
title: vela env delete
---
Delete environment
### Synopsis
Delete environment
```
vela env delete
```
### Examples
```
vela env delete test
```
### Options
```
-h, --help help for delete
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela env](vela_env) - Manage environments
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,40 @@
---
title: vela env init
---
Create environments
### Synopsis
Create environment and set the currently using environment
```
vela env init <envName>
```
### Examples
```
vela env init test --namespace test --email my@email.com
```
### Options
```
--domain string specify domain your applications
--email string specify email for production TLS Certificate notification
-h, --help help for init
--namespace string specify K8s namespace for env
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela env](vela_env) - Manage environments
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,37 @@
---
title: vela env ls
---
List environments
### Synopsis
List all environments
```
vela env ls
```
### Examples
```
vela env ls [env-name]
```
### Options
```
-h, --help help for ls
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela env](vela_env) - Manage environments
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,37 @@
---
title: vela env set
---
Set an environment
### Synopsis
Set an environment as the current using one
```
vela env set
```
### Examples
```
vela env set test
```
### Options
```
-h, --help help for set
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela env](vela_env) - Manage environments
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,35 @@
---
title: vela exec
---
Execute command in a container
### Synopsis
Execute command in a container
```
vela exec [flags] APP_NAME -- COMMAND [args...]
```
### Options
```
-h, --help help for exec
--pod-running-timeout duration The length of time (like 5s, 2m, or 3h, higher than zero) to wait until at least one pod is running (default 1m0s)
-i, --stdin Pass stdin to the container (default true)
-s, --svc string service name
-t, --tty Stdin is a TTY (default true)
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela](vela) -
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,32 @@
---
title: vela export
---
Export deploy manifests from appfile
### Synopsis
Export deploy manifests from appfile
```
vela export
```
### Options
```
-f, -- string specify file path for appfile
-h, --help help for export
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela](vela) -
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,27 @@
---
title: vela help
---
Help about any command
```
vela help [command]
```
### Options
```
-h, --help help for help
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela](vela) -
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,38 @@
---
title: vela init
---
Create scaffold for an application
### Synopsis
Create scaffold for an application
```
vela init
```
### Examples
```
vela init
```
### Options
```
-h, --help help for init
--render-only Rendering vela.yaml in current dir and do not deploy
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela](vela) -
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,32 @@
---
title: vela logs
---
Tail logs for application
### Synopsis
Tail logs for application
```
vela logs [flags]
```
### Options
```
-h, --help help for logs
-o, --output string output format for logs, support: [default, raw, json] (default "default")
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela](vela) -
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,38 @@
---
title: vela ls
---
List applications
### Synopsis
List all applications in cluster
```
vela ls
```
### Examples
```
vela ls
```
### Options
```
-h, --help help for ls
-n, --namespace string specify the namespace the application want to list, default is the current env namespace
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela](vela) -
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,40 @@
---
title: vela port-forward
---
Forward local ports to services in an application
### Synopsis
Forward local ports to services in an application
```
vela port-forward APP_NAME [flags]
```
### Examples
```
port-forward APP_NAME [options] [LOCAL_PORT:]REMOTE_PORT [...[LOCAL_PORT_N:]REMOTE_PORT_N]
```
### Options
```
--address strings Addresses to listen on (comma separated). Only accepts IP addresses or localhost as a value. When localhost is supplied, vela will try to bind on both 127.0.0.1 and ::1 and will fail if neither of these addresses are available to bind. (default [localhost])
-h, --help help for port-forward
--pod-running-timeout duration The length of time (like 5s, 2m, or 3h, higher than zero) to wait until at least one pod is running (default 1m0s)
--route forward ports from route trait service
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela](vela) -
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,38 @@
---
title: vela show
---
Show the reference doc for a workload type or trait
### Synopsis
Show the reference doc for a workload type or trait
```
vela show [flags]
```
### Examples
```
show webservice
```
### Options
```
-h, --help help for show
--web start web doc site
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela](vela) -
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,38 @@
---
title: vela status
---
Show status of an application
### Synopsis
Show status of an application, including workloads and traits of each service.
```
vela status APP_NAME [flags]
```
### Examples
```
vela status APP_NAME
```
### Options
```
-h, --help help for status
-s, --svc string service name
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela](vela) -
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,29 @@
---
title: vela system
---
System management utilities
### Synopsis
System management utilities
### Options
```
-h, --help help for system
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela](vela) -
* [vela system dry-run](vela_system_dry-run) - Dry Run an application, and output the conversion result to stdout
* [vela system info](vela_system_info) - Show vela client and cluster chartPath
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,38 @@
---
title: vela system dry-run
---
Dry Run an application, and output the conversion result to stdout
### Synopsis
Dry Run an application, and output the conversion result to stdout
```
vela system dry-run
```
### Examples
```
vela dry-run
```
### Options
```
-f, --file string application file name (default "./app.yaml")
-h, --help help for dry-run
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela system](vela_system) - System management utilities
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,31 @@
---
title: vela system info
---
Show vela client and cluster chartPath
### Synopsis
Show vela client and cluster chartPath
```
vela system info [flags]
```
### Options
```
-h, --help help for info
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela system](vela_system) - System management utilities
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,28 @@
---
title: vela template
---
Manage templates
### Synopsis
Manage templates
### Options
```
-h, --help help for template
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela](vela) -
* [vela template context](vela_template_context) - Show context parameters
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,37 @@
---
title: vela template context
---
Show context parameters
### Synopsis
Show context parameter
```
vela template context
```
### Examples
```
vela template context
```
### Options
```
-h, --help help for context
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela template](vela_template) - Manage templates
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,37 @@
---
title: vela traits
---
List traits
### Synopsis
List traits
```
vela traits
```
### Examples
```
vela traits
```
### Options
```
-h, --help help for traits
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela](vela) -
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,32 @@
---
title: vela up
---
Apply an appfile
### Synopsis
Apply an appfile
```
vela up
```
### Options
```
-f, -- string specify file path for appfile
-h, --help help for up
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela](vela) -
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,31 @@
---
title: vela version
---
Prints out build version information
### Synopsis
Prints out build version information
```
vela version [flags]
```
### Options
```
-h, --help help for version
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela](vela) -
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,37 @@
---
title: vela workloads
---
List workloads
### Synopsis
List workloads
```
vela workloads
```
### Examples
```
vela workloads
```
### Options
```
-h, --help help for workloads
```
### Options inherited from parent commands
```
-e, --env string specify environment name for application
```
### SEE ALSO
* [vela](vela) -
###### Auto generated by spf13/cobra on 20-Mar-2021

View File

@ -0,0 +1,105 @@
---
title: 核心概念
---
*"KubeVela 是一个面向平台构建者基于 Kubernetes 构建易用可拓展的云原生应用平台的引擎。"*
在本部分中,我们会对 KubeVela 的核心思想进行详细解释,并进一步阐清一些在本项目中被广泛使用的技术术语。
## 综述
首先KubeVela 引入了下面所述的带有关注点分离思想的工作流:
- **平台团队**
- 通过给部署环境和可重复使用的能力模块编写模板来构建应用,并将他们注册到集群中。
- **业务用户**
- 选择部署环境、模型和可用模块来组装应用,并把应用部署到目标环境中。
工作流如下图所示:
![alt](resources/how-it-works.png)
这种基于模板的工作流使得平台团队能够在一系列的 Kubernetes CRD 之上,引导用户遵守他们构建的最佳实践和 部署经验,并且可以很自然地为业务用户提供 PaaS 级别的体验(比如:“以应用为中心”,“高层次的抽象”,“自助式运维操作”等等)。
![alt](resources/what-is-kubevela.png)
下面开始介绍 KubeVela 的核心概念
## `Application`
应用(*Application*),是 KubeVela 的核心 API。它使得业务开发者只需要基于一个单一的制品和一些简单的原语就可以构建完整的应用。
在应用交付平台中,有一个 *Application* 的概念尤为重要,因为这可以很大程度上简化运维任务,并且作为一个锚点避免操作过程中产生配置漂移的问题。同时,它也帮助应用交付过程中引入 Kubernetes的能力提供了一个更简单的、且不用依赖底层细节的途径。 举个例子,开发者能够不需要每次都定义一个详细的 Kubernetes Deployment + Service 的组合来建模一个 web service ,或者不用依靠底层的 KEDA ScaleObject 来获取自动扩容的需求。
### 举例
一个需要两个组件(比如 `frontend``backend` )的 `website` 应用可以如下建模:
```yaml
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: website
spec:
components:
- name: backend
type: worker
properties:
image: busybox
cmd:
- sleep
- '1000'
- name: frontend
type: webservice
properties:
image: nginx
traits:
- type: autoscaler
properties:
min: 1
max: 10
- type: sidecar
properties:
name: "sidecar-test"
image: "fluentd"
```
## 构建抽象
不像大多数的高层次的抽象KubeVela 中的 `Application` 资源是一种积木风格的对象,而且它甚至没有固定的 schema。相反它由构建模块比如app components应用组件和 traits运维能力构成。这种构建模块允许开发者通过自己定义的抽象来集成平台的能力到此应用定义。
定义抽象和建模平台能力的构建模块是 `ComponentDefinition``TraitDefinition`
### ComponentDefinition
`ComponentDefinition` ,组件定义,是一个预先定义好的,用于可部署的工作负载的*模板*。它包括了模板参数化的和工作负载特性的信息作为一种声明式API资源。
因此,`Application` 抽象本质上定义了在目标集群中,用户想要如何来**实例化**给定 component definition。特别地`.type` 字段引用安装了的 `ComponentDefinition` 的名字; `.properties` 字段是用户设置的用来实例化它的值。
一些主要的 component definition 有:长期运行的 web service、一次性的 task 和 Redis数据库。所有的 component definition 均应在平台提前安装,或由组件提供商,比如第三方软件供应商,来提供。
### TraitDefinition
可选的,每一个组件都有一个 `.traits` 部分。这个部分通过使用操作类行为,比如负载均衡策略、网络入口路由、自动扩容策略,和升级策略等,来增强组件实例。
*Trait*,运维能力,是由平台提供的操作性质的特性。为了给组件实例附加运维能力,用户需要声明 `.type` 字段来引用特定的 `TraitDefinition``.properties` ,以此来设置给定运维能力的属性值。相似的,`TraitDefiniton` 同样允许用户来给这些操作特性定义*模板*。
在 KubeVela 中,我们还将 component definition 和 trait definitions 定义称为 *“capability definitions”*
## Environment
在将应用发布到生产环境之前,在 testing/staging workspace 中测试代码很重要。在 KubeVela我们将这些 workspace 描述为 “deployment environments”部署环境或者简称为 “environments”环境。每一个环境都有属于自己的配置比如说domainKubernetes集群命名空间配置数据和访问控制策略等来允许用户创建不同的部署环境比如 “test”和 “production”。
到目前为止,一个 KubeVela 的 `environment` 只映射到一个 Kubernetes 的命名空间。集群级环境正在开发中。
### 总结
KubeVela的主要概念由下图所示
![alt](resources/concepts.png)
## 架构
KubeVela的整体架构由下图所示
![alt](resources/arch.png)
特别的application controller 负责应用的抽象和封装(比如负责 `Application``Definition` 的 controller 。Rollout contoller 负责以整个应用为单位处理渐进式 rollout 策略。多集群部署引擎,在流量切分和 rollout 特性的支持下,负责跨多集群和环境部署应用。

View File

@ -0,0 +1,133 @@
---
title: 能力管理
---
在 KubeVela 中,开发者可以从任何包含 OAM 抽象文件的 GitHub 仓库中安装更多的能力(例如:新 component 类型或者 traits )。我们将这些 GitHub 仓库称为 _Capability Centers_
KubeVela 可以从这些仓库中自动发现 OAM 抽象文件,并且同步这些能力到我们的 KubeVela 平台中。
## 添加能力中心
新增且同步能力中心到 KubeVela
```bash
$ vela cap center config my-center https://github.com/oam-dev/catalog/tree/master/registry
successfully sync 1/1 from my-center remote center
Successfully configured capability center my-center and sync from remote
$ vela cap center sync my-center
successfully sync 1/1 from my-center remote center
sync finished
```
现在,该能力中心 `my-center` 已经可以使用。
## 列出能力中心
你可以列出或者添加更多能力中心。
```bash
$ vela cap center ls
NAME ADDRESS
my-center https://github.com/oam-dev/catalog/tree/master/registry
```
## [可选] 删除能力中心
删除一个
```bash
$ vela cap center remove my-center
```
## 列出所有可用的能力中心
列出某个中心所有可用的能力。
```bash
$ vela cap ls my-center
NAME CENTER TYPE DEFINITION STATUS APPLIES-TO
clonesetservice my-center componentDefinition clonesets.apps.kruise.io uninstalled []
```
## 从能力中心安装能力
我们开始从 `my-center` 安装新 component `clonesetservice` 到你的 KubeVela 平台。
你可以先安装 OpenKruise 。
```shell
helm install kruise https://github.com/openkruise/kruise/releases/download/v0.7.0/kruise-chart.tgz
```
`my-center` 中安装 `clonesetservice` component 。
```bash
$ vela cap install my-center/clonesetservice
Installing component capability clonesetservice
Successfully installed capability clonesetservice from my-center
```
## 使用新安装的能力
我们先检查 `clonesetservice` component 是否已经被安装到平台:
```bash
$ vela components
NAME NAMESPACE WORKLOAD DESCRIPTION
clonesetservice vela-system clonesets.apps.kruise.io Describes long-running, scalable, containerized services
that have a stable network endpoint to receive external
network traffic from customers. If workload type is skipped
for any service defined in Appfile, it will be defaulted to
`webservice` type.
```
很棒!现在我们部署使用 Appfile 部署一个应用。
```bash
$ cat << EOF > vela.yaml
name: testapp
services:
testsvc:
type: clonesetservice
image: crccheck/hello-world
port: 8000
EOF
```
```bash
$ vela up
Parsing vela appfile ...
Load Template ...
Rendering configs for service (testsvc)...
Writing deploy config to (.vela/deploy.yaml)
Applying application ...
Checking if app has been deployed...
App has not been deployed, creating a new deployment...
Updating: core.oam.dev/v1alpha2, Kind=HealthScope in default
✅ App has been deployed 🚀🚀🚀
Port forward: vela port-forward testapp
SSH: vela exec testapp
Logging: vela logs testapp
App status: vela status testapp
Service status: vela status testapp --svc testsvc
```
随后,该 cloneset 已经被部署到你的环境。
```shell
$ kubectl get clonesets.apps.kruise.io
NAME DESIRED UPDATED UPDATED_READY READY TOTAL AGE
testsvc 1 1 1 1 1 46s
```
## 删除能力
> 注意,删除能力前请先确认没有被应用引用。
```bash
$ vela cap uninstall my-center/clonesetservice
Successfully uninstalled capability clonesetservice
```

View File

@ -0,0 +1,9 @@
---
title: 查看应用的日志
---
```bash
$ vela logs testapp
```
执行如上命令后就能查看指定的 testapp 容器的日志。如果只有一个容器,则默认会查看该容器的日志

View File

@ -0,0 +1,103 @@
---
title: The Reference Documentation Guide of Capabilities
---
In this documentation, we will show how to check the detailed schema of a given capability (i.e. workload type or trait).
This may sound challenging because every capability is a "plug-in" in KubeVela (even for the built-in ones), also, it's by design that KubeVela allows platform administrators to modify the capability templates at any time. In this case, do we need to manually write documentation for every newly installed capability? And how can we ensure those documentations for the system is up-to-date?
## Using Browser
Actually, as a important part of its "extensibility" design, KubeVela will always **automatically generate** reference documentation for every workload type or trait registered in your Kubernetes cluster, based on its template in definition of course. This feature works for any capability: either built-in ones or your own workload types/traits.
Thus, as an end user, the only thing you need to do is:
```console
$ vela show WORKLOAD_TYPE or TRAIT --web
```
This command will automatically open the reference documentation for given workload type or trait in your default browser.
### For Workload Types
Let's take `$ vela show webservice --web` as example. The detailed schema documentation for `Web Service` workload type will show up immediately as below:
![](../resources/vela_show_webservice.jpg)
Note that there's in the section named `Specification`, it even provides you with a full sample for the usage of this workload type with a fake name `my-service-name`.
### For Traits
Similarly, we can also do `$ vela show autoscale --web`:
![](../resources/vela_show_autoscale.jpg)
With these auto-generated reference documentations, we could easily complete the application description by simple copy-paste, for example:
```yaml
name: helloworld
services:
backend: # copy-paste from the webservice ref doc above
image: oamdev/testapp:v1
cmd: ["node", "server.js"]
port: 8080
cpu: "0.1"
autoscale: # copy-paste and modify from autoscaler ref doc above
min: 1
max: 8
cron:
startAt: "19:00"
duration: "2h"
days: "Friday"
replicas: 4
timezone: "America/Los_Angeles"
```
## Using Terminal
This reference doc feature also works for terminal-only case. For example:
```shell
$ vela show webservice
# Properties
+-------+----------------------------------------------------------------------------------+---------------+----------+---------+
| NAME | DESCRIPTION | TYPE | REQUIRED | DEFAULT |
+-------+----------------------------------------------------------------------------------+---------------+----------+---------+
| cmd | Commands to run in the container | []string | false | |
| env | Define arguments by using environment variables | [[]env](#env) | false | |
| image | Which image would you like to use for your service | string | true | |
| port | Which port do you want customer traffic sent to | int | true | 80 |
| cpu | Number of CPU units for the service, like `0.5` (0.5 CPU core), `1` (1 CPU core) | string | false | |
+-------+----------------------------------------------------------------------------------+---------------+----------+---------+
## env
+-----------+-----------------------------------------------------------+-------------------------+----------+---------+
| NAME | DESCRIPTION | TYPE | REQUIRED | DEFAULT |
+-----------+-----------------------------------------------------------+-------------------------+----------+---------+
| name | Environment variable name | string | true | |
| value | The value of the environment variable | string | false | |
| valueFrom | Specifies a source the value of this var should come from | [valueFrom](#valueFrom) | false | |
+-----------+-----------------------------------------------------------+-------------------------+----------+---------+
### valueFrom
+--------------+--------------------------------------------------+-------------------------------+----------+---------+
| NAME | DESCRIPTION | TYPE | REQUIRED | DEFAULT |
+--------------+--------------------------------------------------+-------------------------------+----------+---------+
| secretKeyRef | Selects a key of a secret in the pod's namespace | [secretKeyRef](#secretKeyRef) | true | |
+--------------+--------------------------------------------------+-------------------------------+----------+---------+
#### secretKeyRef
+------+------------------------------------------------------------------+--------+----------+---------+
| NAME | DESCRIPTION | TYPE | REQUIRED | DEFAULT |
+------+------------------------------------------------------------------+--------+----------+---------+
| name | The name of the secret in the pod's namespace to select from | string | true | |
| key | The key of the secret to select from. Must be a valid secret key | string | true | |
+------+------------------------------------------------------------------+--------+----------+---------+
```
> Note that for all the built-in capabilities, we already published their reference docs [here](https://kubevela.io/#/en/developers/references/) based on the same doc generation mechanism.

View File

@ -0,0 +1,85 @@
---
title: 在应用程序中配置数据或环境
---
`vela` 提供 `config` 命令用于管理配置数据。
## `vela config set`
```bash
$ vela config set test a=b c=d
reading existing config data and merging with user input
config data saved successfully ✅
```
## `vela config get`
```bash
$ vela config get test
Data:
a: b
c: d
```
## `vela config del`
```bash
$ vela config del test
config (test) deleted successfully
```
## `vela config ls`
```bash
$ vela config set test a=b
$ vela config set test2 c=d
$ vela config ls
NAME
test
test2
```
## 在应用程序中配置环境变量
可以在应用程序中将配置数据设置为环境变量。
```bash
$ vela config set demo DEMO_HELLO=helloworld
```
将以下内容保存为 `vela.yaml` 到当前目录中:
```yaml
name: testapp
services:
env-config-demo:
image: heroku/nodejs-hello-world
config: demo
```
然后运行:
```bash
$ vela up
Parsing vela.yaml ...
Loading templates ...
Rendering configs for service (env-config-demo)...
Writing deploy config to (.vela/deploy.yaml)
Applying deploy configs ...
Checking if app has been deployed...
App has not been deployed, creating a new deployment...
✅ App has been deployed 🚀🚀🚀
Port forward: vela port-forward testapp
SSH: vela exec testapp
Logging: vela logs testapp
App status: vela status testapp
Service status: vela status testapp --svc env-config-demo
```
检查环境变量:
```
$ vela exec testapp -- printenv | grep DEMO_HELLO
DEMO_HELLO=helloworld
```

View File

@ -0,0 +1,89 @@
---
title: 设置部署环境
---
通过部署环境可以为你的应用配置全局工作空间、email 以及域名。通常情况下,部署环境分为 `test` (测试环境)、`staging` (生产镜像环境)、`prod`(生产环境)等。
## 创建环境
```bash
$ vela env init demo --email my@email.com
environment demo created, Namespace: default, Email: my@email.com
```
## 检查部署环境元数据
```bash
$ vela env ls
NAME CURRENT NAMESPACE EMAIL DOMAIN
default default
demo * default my@email.com
```
默认情况下, 将会在 K8s 默认的命名空间 `default` 下面创建环境。
## 配置变更
你可以通过再次执行如下命令变更环境配置。
```bash
$ vela env init demo --namespace demo
environment demo created, Namespace: demo, Email: my@email.com
```
```bash
$ vela env ls
NAME CURRENT NAMESPACE EMAIL DOMAIN
default default
demo * demo my@email.com
```
**注意:部署环境只针对新创建的应用生效,之前创建的应用不会受到任何影响。**
## [可选操作] 配置域名(前提:拥有 public IP
如果你使用的是云厂商提供的 k8s 服务并已为 ingress 配置了公网 IP那么就可以在环境中配置域名来使用之后你就可以通过该域名来访问应用并且自动支持 mTLS 双向认证。
例如, 你可以使用下面的命令方式获得 ingress service 的公网 IP
```bash
$ kubectl get svc -A | grep LoadBalancer
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
nginx-ingress-lb LoadBalancer 172.21.2.174 123.57.10.233 80:32740/TCP,443:32086/TCP 41d
```
命令响应结果 `EXTERNAL-IP` 列的值123.57.10.233 就是公网 IP。 在 DNS 中添加一条 `A` 记录吧:
```
*.your.domain => 123.57.10.233
```
如果没有自定义域名,那么你可以使用如 `123.57.10.233.xip.io` 作为域名,其中 `xip.io` 将会自动路由到前面的 IP `123.57.10.233`
```bash
$ vela env init demo --domain 123.57.10.233.xip.io
environment demo updated, Namespace: demo, Email: my@email.com
```
### 在 Appfile 中使用域名
由于在部署环境中已经配置了全局域名, 就不需要在 route 配置中特别指定域名了。
```yaml
# in demo environment
servcies:
express-server:
...
route:
rules:
- path: /testapp
rewriteTarget: /
```
```
$ curl http://123.57.10.233.xip.io/testapp
Hello World
```

View File

@ -0,0 +1,10 @@
---
title: 在容器中运行命令
---
运行如下命令:
```
$ vela exec testapp -- /bin/sh
```
这将打开一个 shell 访问 testapp 容器。

View File

@ -0,0 +1,238 @@
---
title: Automatically scale workloads by resource utilization metrics and cron
---
## Prerequisite
Make sure auto-scaler trait controller is installed in your cluster
Install auto-scaler trait controller with helm
1. Add helm chart repo for autoscaler trait
```shell script
helm repo add oam.catalog http://oam.dev/catalog/
```
2. Update the chart repo
```shell script
helm repo update
```
3. Install autoscaler trait controller
```shell script
helm install --create-namespace -n vela-system autoscalertrait oam.catalog/autoscalertrait
Autoscale depends on metrics server, please [enable it in your Kubernetes cluster](../references/devex/faq#autoscale-how-to-enable-metrics-server-in-various-kubernetes-clusters) at the beginning.
> Note: autoscale is one of the extension capabilities [installed from cap center](../cap-center),
> please install it if you can't find it in `vela traits`.
## Setting cron auto-scaling policy
Introduce how to automatically scale workloads by cron.
1. Prepare Appfile
```yaml
name: testapp
services:
express-server:
# this image will be used in both build and deploy steps
image: oamdev/testapp:v1
cmd: ["node", "server.js"]
port: 8080
autoscale:
min: 1
max: 4
cron:
startAt: "14:00"
duration: "2h"
days: "Monday, Thursday"
replicas: 2
timezone: "America/Los_Angeles"
```
> The full specification of `autoscale` could show up by `$ vela show autoscale`.
2. Deploy an application
```
$ vela up
Parsing vela.yaml ...
Loading templates ...
Rendering configs for service (express-server)...
Writing deploy config to (.vela/deploy.yaml)
Applying deploy configs ...
Checking if app has been deployed...
App has not been deployed, creating a new deployment...
✅ App has been deployed 🚀🚀🚀
Port forward: vela port-forward testapp
SSH: vela exec testapp
Logging: vela logs testapp
App status: vela status testapp
Service status: vela status testapp --svc express-server
```
3. Check the replicas and wait for the scaling to take effect
Check the replicas of the application, there is one replica.
```
$ vela status testapp
About:
Name: testapp
Namespace: default
Created at: 2020-11-05 17:09:02.426632 +0800 CST
Updated at: 2020-11-05 17:09:02.426632 +0800 CST
Services:
- Name: express-server
Type: webservice
HEALTHY Ready: 1/1
Traits:
- ✅ autoscale: type: cron replicas(min/max/current): 1/4/1
Last Deployment:
Created at: 2020-11-05 17:09:03 +0800 CST
Updated at: 2020-11-05T17:09:02+08:00
```
Wait till the time clocks `startAt`, and check again. The replicas become to two, which is specified as
`replicas` in `vela.yaml`.
```
$ vela status testapp
About:
Name: testapp
Namespace: default
Created at: 2020-11-10 10:18:59.498079 +0800 CST
Updated at: 2020-11-10 10:18:59.49808 +0800 CST
Services:
- Name: express-server
Type: webservice
HEALTHY Ready: 2/2
Traits:
- ✅ autoscale: type: cron replicas(min/max/current): 1/4/2
Last Deployment:
Created at: 2020-11-10 10:18:59 +0800 CST
Updated at: 2020-11-10T10:18:59+08:00
```
Wait after the period ends, the replicas will be one eventually.
## Setting auto-scaling policy of CPU resource utilization
Introduce how to automatically scale workloads by CPU resource utilization.
1. Prepare Appfile
Modify `vela.yaml` as below. We add field `services.express-server.cpu` and change the auto-scaling policy
from cron to cpu utilization by updating filed `services.express-server.autoscale`.
```yaml
name: testapp
services:
express-server:
image: oamdev/testapp:v1
cmd: ["node", "server.js"]
port: 8080
cpu: "0.01"
autoscale:
min: 1
max: 5
cpuPercent: 10
```
2. Deploy an application
```bash
$ vela up
```
3. Expose the service entrypoint of the application
```
$ vela port-forward helloworld 80
Forwarding from 127.0.0.1:80 -> 80
Forwarding from [::1]:80 -> 80
Forward successfully! Opening browser ...
Handling connection for 80
Handling connection for 80
Handling connection for 80
Handling connection for 80
```
On your macOS, you might need to add `sudo` ahead of the command.
4. Monitor the replicas changing
Continue to monitor the replicas changing when the application becomes overloaded. You can use Apache HTTP server
benchmarking tool `ab` to mock many requests to the application.
```
$ ab -n 10000 -c 200 http://127.0.0.1/
This is ApacheBench, Version 2.3 <$Revision: 1843412 $>
Copyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/
Licensed to The Apache Software Foundation, http://www.apache.org/
Benchmarking 127.0.0.1 (be patient)
Completed 1000 requests
```
The replicas gradually increase from one to four.
```
$ vela status helloworld --svc frontend
About:
Name: helloworld
Namespace: default
Created at: 2020-11-05 20:07:21.830118 +0800 CST
Updated at: 2020-11-05 20:50:42.664725 +0800 CST
Services:
- Name: frontend
Type: webservice
HEALTHY Ready: 1/1
Traits:
- ✅ autoscale: type: cpu cpu-utilization(target/current): 5%/10% replicas(min/max/current): 1/5/2
Last Deployment:
Created at: 2020-11-05 20:07:23 +0800 CST
Updated at: 2020-11-05T20:50:42+08:00
```
```
$ vela status helloworld --svc frontend
About:
Name: helloworld
Namespace: default
Created at: 2020-11-05 20:07:21.830118 +0800 CST
Updated at: 2020-11-05 20:50:42.664725 +0800 CST
Services:
- Name: frontend
Type: webservice
HEALTHY Ready: 1/1
Traits:
- ✅ autoscale: type: cpu cpu-utilization(target/current): 5%/14% replicas(min/max/current): 1/5/4
Last Deployment:
Created at: 2020-11-05 20:07:23 +0800 CST
Updated at: 2020-11-05T20:50:42+08:00
```
Stop `ab` tool, and the replicas will decrease to one eventually.

View File

@ -0,0 +1,107 @@
---
title: Monitoring Application
---
If your application has exposed metrics, you can easily tell the platform how to collect the metrics data from your app with `metrics` capability.
## Prerequisite
Make sure metrics trait controller is installed in your cluster
Install metrics trait controller with helm
1. Add helm chart repo for metrics trait
```shell script
helm repo add oam.catalog http://oam.dev/catalog/
```
2. Update the chart repo
```shell script
helm repo update
```
3. Install metrics trait controller
```shell script
helm install --create-namespace -n vela-system metricstrait oam.catalog/metricstrait
> Note: metrics is one of the extension capabilities [installed from cap center](../cap-center),
> please install it if you can't find it in `vela traits`.
## Setting metrics policy
Let's run [`christianhxc/gorandom:1.0`](https://github.com/christianhxc/prometheus-tutorial) as an example app.
The app will emit random latencies as metrics.
1. Prepare Appfile:
```bash
$ cat <<EOF > vela.yaml
name: metricapp
services:
metricapp:
type: webservice
image: christianhxc/gorandom:1.0
port: 8080
metrics:
enabled: true
format: prometheus
path: /metrics
port: 0
scheme: http
EOF
```
> The full specification of `metrics` could show up by `$ vela show metrics`.
2. Deploy the application:
```bash
$ vela up
```
3. Check status:
```bash
$ vela status metricapp
About:
Name: metricapp
Namespace: default
Created at: 2020-11-11 17:00:59.436347573 -0800 PST
Updated at: 2020-11-11 17:01:06.511064661 -0800 PST
Services:
- Name: metricapp
Type: webservice
HEALTHY Ready: 1/1
Traits:
- ✅ metrics: Monitoring port: 8080, path: /metrics, format: prometheus, schema: http.
Last Deployment:
Created at: 2020-11-11 17:00:59 -0800 PST
Updated at: 2020-11-11T17:01:06-08:00
```
The metrics trait will automatically discover port and label to monitor if no parameters specified.
If more than one ports found, it will choose the first one by default.
**(Optional) Verify that the metrics are collected on Prometheus**
<details>
Expose the port of Prometheus dashboard:
```bash
kubectl --namespace monitoring port-forward `kubectl -n monitoring get pods -l prometheus=oam -o name` 9090
```
Then access the Prometheus dashboard via http://localhost:9090/targets
![Prometheus Dashboard](../../resources/metrics.jpg)
</details>

View File

@ -0,0 +1,163 @@
---
title: Setting Rollout Strategy
---
> Note: rollout is one of the extension capabilities [installed from cap center](../cap-center),
> please install it if you can't find it in `vela traits`.
The `rollout` section is used to configure Canary strategy to release your app.
Add rollout config under `express-server` along with a `route`.
```yaml
name: testapp
services:
express-server:
type: webservice
image: oamdev/testapp:rolling01
port: 80
rollout:
replicas: 5
stepWeight: 20
interval: "30s"
route:
domain: "example.com"
```
> The full specification of `rollout` could show up by `$ vela show rollout`.
Apply this `appfile.yaml`:
```bash
$ vela up
```
You could check the status by:
```bash
$ vela status testapp
About:
Name: testapp
Namespace: myenv
Created at: 2020-11-09 17:34:38.064006 +0800 CST
Updated at: 2020-11-10 17:05:53.903168 +0800 CST
Services:
- Name: testapp
Type: webservice
HEALTHY Ready: 5/5
Traits:
- ✅ rollout: interval=5s
replicas=5
stepWeight=20
- ✅ route: Visiting URL: http://example.com IP: <your-ingress-IP-address>
Last Deployment:
Created at: 2020-11-09 17:34:38 +0800 CST
Updated at: 2020-11-10T17:05:53+08:00
```
Visiting this app by:
```bash
$ curl -H "Host:example.com" http://<your-ingress-IP-address>/
Hello World -- Rolling 01
```
In day 2, assuming we have make some changes on our app and build the new image and name it by `oamdev/testapp:v2`.
Let's update the appfile by:
```yaml
name: testapp
services:
express-server:
type: webservice
- image: oamdev/testapp:rolling01
+ image: oamdev/testapp:rolling02
port: 80
rollout:
replicas: 5
stepWeight: 20
interval: "30s"
route:
domain: example.com
```
Apply this `appfile.yaml` again:
```bash
$ vela up
```
You could run `vela status` several times to see the instance rolling:
```shell script
$ vela status testapp
About:
Name: testapp
Namespace: myenv
Created at: 2020-11-12 19:02:40.353693 +0800 CST
Updated at: 2020-11-12 19:02:40.353693 +0800 CST
Services:
- Name: express-server
Type: webservice
HEALTHY express-server-v2:Ready: 1/1 express-server-v1:Ready: 4/4
Traits:
- ✅ rollout: interval=30s
replicas=5
stepWeight=20
- ✅ route: Visiting by using 'vela port-forward testapp --route'
Last Deployment:
Created at: 2020-11-12 17:20:46 +0800 CST
Updated at: 2020-11-12T19:02:40+08:00
```
You could then try to `curl` your app multiple times and and see how the app being rollout following Canary strategy:
```bash
$ curl -H "Host:example.com" http://<your-ingress-ip-address>/
Hello World -- This is rolling 02
$ curl -H "Host:example.com" http://<your-ingress-ip-address>/
Hello World -- Rolling 01
$ curl -H "Host:example.com" http://<your-ingress-ip-address>/
Hello World -- Rolling 01
$ curl -H "Host:example.com" http://<your-ingress-ip-address>/
Hello World -- This is rolling 02
$ curl -H "Host:example.com" http://<your-ingress-ip-address>/
Hello World -- Rolling 01
$ curl -H "Host:example.com" http://<your-ingress-ip-address>/
Hello World -- This is rolling 02
```
**How `Rollout` works?**
<details>
`Rollout` trait implements progressive release process to rollout your app following [Canary strategy](https://martinfowler.com/bliki/CanaryRelease.html).
In detail, `Rollout` controller will create a canary of your app , and then gradually shift traffic to the canary while measuring key performance indicators like HTTP requests success rate at the same time.
![alt](../../resources/traffic-shifting-analysis.png)
In this sample, for every `10s`, `5%` traffic will be shifted to canary from the primary, until the traffic on canary reached `50%`. At the mean time, the instance number of canary will automatically scale to `replicas: 2` per configured in Appfile.
Based on analysis result of the KPIs during this traffic shifting, a canary will be promoted or aborted if analysis is failed. If promoting, the primary will be upgraded from v1 to v2, and traffic will be fully shifted back to the primary instances. So as result, canary instances will be deleted after the promotion finished.
![alt](../../resources/promotion.png)
> Note: KubeVela's `Rollout` trait is implemented with [Weaveworks Flagger](https://flagger.app/) operator.
</details>

View File

@ -0,0 +1,82 @@
---
title: Setting Routes
---
The `route` section is used to configure the access to your app.
## Prerequisite
Make sure route trait controller is installed in your cluster
Install route trait controller with helm
1. Add helm chart repo for route trait
```shell script
helm repo add oam.catalog http://oam.dev/catalog/
```
2. Update the chart repo
```shell script
helm repo update
```
3. Install route trait controller
```shell script
helm install --create-namespace -n vela-system routetrait oam.catalog/routetrait
> Note: route is one of the extension capabilities [installed from cap center](../cap-center),
> please install it if you can't find it in `vela traits`.
## Setting route policy
Add routing config under `express-server`:
```yaml
services:
express-server:
...
route:
domain: example.com
rules:
- path: /testapp
rewriteTarget: /
```
> The full specification of `route` could show up by `$ vela show route`.
Apply again:
```bash
$ vela up
```
Check the status until we see route is ready:
```bash
$ vela status testapp
About:
Name: testapp
Namespace: default
Created at: 2020-11-04 16:34:43.762730145 -0800 PST
Updated at: 2020-11-11 16:21:37.761158941 -0800 PST
Services:
- Name: express-server
Type: webservice
HEALTHY Ready: 1/1
Last Deployment:
Created at: 2020-11-11 16:21:37 -0800 PST
Updated at: 2020-11-11T16:21:37-08:00
Routes:
- route: Visiting URL: http://example.com IP: <ingress-IP-address>
```
**In [kind cluster setup](../../install#kind)**, you can visit the service via localhost:
> If not in kind cluster, replace 'localhost' with ingress address
```
$ curl -H "Host:example.com" http://localhost/testapp
Hello World
```

View File

@ -0,0 +1,249 @@
---
title: 学习使用 Appfile
---
`appfile` 的示例如下:
```yaml
name: testapp
services:
frontend: # 1st service
image: oamdev/testapp:v1
build:
docker:
file: Dockerfile
context: .
cmd: ["node", "server.js"]
port: 8080
route: # trait
domain: example.com
rules:
- path: /testapp
rewriteTarget: /
backend: # 2nd service
type: task # workload type
image: perl
cmd: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]
```
在底层,`Appfile` 会从源码构建镜像,然后用镜像名称创建 `application` 资源
## Schema
> 在深入学习 Appfile 的详细 schema 之前,我们建议你先熟悉 KubeVela 的[核心概念](../concepts)
```yaml
name: _app-name_
services:
_service-name_:
# If `build` section exists, this field will be used as the name to build image. Otherwise, KubeVela will try to pull the image with given name directly.
image: oamdev/testapp:v1
build:
docker:
file: _Dockerfile_path_ # relative path is supported, e.g. "./Dockerfile"
context: _build_context_path_ # relative path is supported, e.g. "."
push:
local: kind # optionally push to local KinD cluster instead of remote registry
type: webservice (default) | worker | task
# detailed configurations of workload
... properties of the specified workload ...
_trait_1_:
# properties of trait 1
_trait_2_:
# properties of trait 2
... more traits and their properties ...
_another_service_name_: # more services can be defined
...
```
> 想了解怎样设置特定类型的 workload 或者 trait请阅读[参考文档手册](./check-ref-doc)
## 示例流程
在以下的流程中,我们会构建并部署一个 NodeJs 的示例 app。该 app 的源文件在[这里](https://github.com/oam-dev/kubevela/tree/master/docs/examples/testapp)。
### 环境要求
- [Docker](https://docs.docker.com/get-docker/) 需要在主机上安装 docker
- [KubeVela](../install) 需要安装 KubeVela 并配置
### 1. 下载测试的 app 的源码
git clone 然后进入 testapp 目录:
```bash
$ git clone https://github.com/oam-dev/kubevela.git
$ cd kubevela/docs/examples/testapp
```
这个示例包含 NodeJs app 的源码和用于构建 app 镜像的Dockerfile
### 2. 使用命令部署 app
我们将会使用目录中的 [vela.yaml](https://github.com/oam-dev/kubevela/tree/master/docs/examples/testapp/vela.yaml) 文件来构建和部署 app
> 注意:请修改 `oamdev` 为你自己注册的账号。或者你可以尝试 `本地测试方式`
```yaml
image: oamdev/testapp:v1 # change this to your image
```
执行如下命令:
```bash
$ vela up
Parsing vela.yaml ...
Loading templates ...
Building service (express-server)...
Sending build context to Docker daemon 71.68kB
Step 1/10 : FROM mhart/alpine-node:12
---> 9d88359808c3
...
pushing image (oamdev/testapp:v1)...
...
Rendering configs for service (express-server)...
Writing deploy config to (.vela/deploy.yaml)
Applying deploy configs ...
Checking if app has been deployed...
App has not been deployed, creating a new deployment...
✅ App has been deployed 🚀🚀🚀
Port forward: vela port-forward testapp
SSH: vela exec testapp
Logging: vela logs testapp
App status: vela status testapp
Service status: vela status testapp --svc express-server
```
检查服务状态:
```bash
$ vela status testapp
About:
Name: testapp
Namespace: default
Created at: 2020-11-02 11:08:32.138484 +0800 CST
Updated at: 2020-11-02 11:08:32.138485 +0800 CST
Services:
- Name: express-server
Type: webservice
HEALTHY Ready: 1/1
Last Deployment:
Created at: 2020-11-02 11:08:33 +0800 CST
Updated at: 2020-11-02T11:08:32+08:00
Routes:
```
#### 本地测试方式
如果你本地有运行的 [kind](../install) 集群,你可以尝试推送到本地。这种方法无需注册远程容器仓库。
`build` 中添加 local 的选项值:
```yaml
build:
# push image into local kind cluster without remote transfer
push:
local: kind
docker:
file: Dockerfile
context: .
```
然后部署到 kind
```bash
$ vela up
```
<details><summary>(进阶) 检查渲染后的 manifests 文件</summary>
默认情况下Vela 通过 `./vela/deploy.yaml` 渲染最后的 manifests 文件:
```yaml
apiVersion: core.oam.dev/v1alpha2
kind: ApplicationConfiguration
metadata:
name: testapp
namespace: default
spec:
components:
- componentName: express-server
---
apiVersion: core.oam.dev/v1alpha2
kind: Component
metadata:
name: express-server
namespace: default
spec:
workload:
apiVersion: apps/v1
kind: Deployment
metadata:
name: express-server
...
---
apiVersion: core.oam.dev/v1alpha2
kind: HealthScope
metadata:
name: testapp-default-health
namespace: default
spec:
...
```
</details>
### [可选] 配置其他类型的 workload
至此,我们成功地部署一个默认类型的 workload 的 *[web 服务](../end-user/components/webservice)*。我们也可以添加 *[Task](../end-user/components/task)* 类型的服务到同一个 app 中。
```yaml
services:
pi:
type: task
image: perl
cmd: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]
express-server:
...
```
然后再次部署 Applfile 来升级应用:
```bash
$ vela up
```
恭喜!你已经学会了使用 `Appfile` 来部署应用了。
## 下一步?
更多关于 app 的操作:
- [Check Application Logs](./check-logs)
- [Execute Commands in Application Container](./exec-cmd)
- [Access Application via Route](./port-forward)

View File

@ -0,0 +1,23 @@
---
title: 端口转发
---
当你的 web 服务 Application 已经被部署就可以通过 `port-forward` 来本地访问。
```bash
$ vela ls
NAME APP WORKLOAD TRAITS STATUS CREATED-TIME
express-server testapp webservice Deployed 2020-09-18 22:42:04 +0800 CST
```
它将直接为你打开浏览器。
```bash
$ vela port-forward testapp
Forwarding from 127.0.0.1:8080 -> 80
Forwarding from [::1]:8080 -> 80
Forward successfully! Opening browser ...
Handling connection for 8080
Handling connection for 8080
```

View File

@ -0,0 +1,112 @@
---
title: 功能参考文档
---
在这篇文档中,我们将展示如何查看给定能力的详细文档 (比如 component 或者 trait)。
这听起来很有挑战,因为每种能力都是 KubeVela 的一个插件(内置能力也是如此)。同时,根据设计, KubeVela 允许平台管理员随时修改功能模板。在这种情况下,我们是否需要为每个新安装的功能手动写文档? 以及我们如何确保系统的那些文档是最新的?
## 使用浏览器
实际上,作为其可扩展设计的重要组成部分, KubeVela 总是会根据模板的定义对每种 workload 类型或者 Kubernetes 集群注册的 trait 自动生成参考文档。此功能适用于任何功能:内置功能或者你自己的 workload 类型/ traits 。
因此,作为一个终端用户,你唯一需要做的事情是:
```console
$ vela show COMPONENT_TYPE or TRAIT --web
```
这条命令会自动在你的默认浏览器中打开对应的 component 类型或者 traint 参考文档。
`$ vela show webservice --web` 为例。 `Web Service` component 类型的详细的文档将立即显示如下:
![](../../resources/vela_show_webservice.jpg)
注意, 在名为 `Specification` 的部分中,它甚至为你提供了一种使用假名称 `my-service-name` 的这种 workload 类型。
同样的, 我们可以执行 `$ vela show autoscale`
![](../../resources/vela_show_autoscale.jpg)
使用这些自动生成的参考文档,我们可以通过简单的复制粘贴轻松地完成应用程序描述,例如:
```yaml
name: helloworld
services:
backend: # 复制粘贴上面的 webservice 参考文档
image: oamdev/testapp:v1
cmd: ["node", "server.js"]
port: 8080
cpu: "0.1"
autoscale: # 复制粘贴并修改上面的 autoscaler 参考文档
min: 1
max: 8
cron:
startAt: "19:00"
duration: "2h"
days: "Friday"
replicas: 4
timezone: "America/Los_Angeles"
```
## 使用命令行终端
此参考文档功能也适用于仅有命令行终端的情况,例如:
```shell
$ vela show webservice
# Properties
+-------+----------------------------------------------------------------------------------+---------------+----------+---------+
| NAME | DESCRIPTION | TYPE | REQUIRED | DEFAULT |
+-------+----------------------------------------------------------------------------------+---------------+----------+---------+
| cmd | Commands to run in the container | []string | false | |
| env | Define arguments by using environment variables | [[]env](#env) | false | |
| image | Which image would you like to use for your service | string | true | |
| port | Which port do you want customer traffic sent to | int | true | 80 |
| cpu | Number of CPU units for the service, like `0.5` (0.5 CPU core), `1` (1 CPU core) | string | false | |
+-------+----------------------------------------------------------------------------------+---------------+----------+---------+
## env
+-----------+-----------------------------------------------------------+-------------------------+----------+---------+
| NAME | DESCRIPTION | TYPE | REQUIRED | DEFAULT |
+-----------+-----------------------------------------------------------+-------------------------+----------+---------+
| name | Environment variable name | string | true | |
| value | The value of the environment variable | string | false | |
| valueFrom | Specifies a source the value of this var should come from | [valueFrom](#valueFrom) | false | |
+-----------+-----------------------------------------------------------+-------------------------+----------+---------+
### valueFrom
+--------------+--------------------------------------------------+-------------------------------+----------+---------+
| NAME | DESCRIPTION | TYPE | REQUIRED | DEFAULT |
+--------------+--------------------------------------------------+-------------------------------+----------+---------+
| secretKeyRef | Selects a key of a secret in the pod's namespace | [secretKeyRef](#secretKeyRef) | true | |
+--------------+--------------------------------------------------+-------------------------------+----------+---------+
#### secretKeyRef
+------+------------------------------------------------------------------+--------+----------+---------+
| NAME | DESCRIPTION | TYPE | REQUIRED | DEFAULT |
+------+------------------------------------------------------------------+--------+----------+---------+
| name | The name of the secret in the pod's namespace to select from | string | true | |
| key | The key of the secret to select from. Must be a valid secret key | string | true | |
+------+------------------------------------------------------------------+--------+----------+---------+
```
## 内置功能
注意,对于所有的内置功能,我们已经将它们的参考文档发布在下面,这些文档遵循同样的文档生成机制。
- Workload Types
- [webservice](component-types/webservice)
- [task](component-types/task)
- [worker](component-types/worker)
- Traits
- [route](traits/route)
- [autoscale](traits/autoscale)
- [rollout](traits/rollout)
- [metrics](traits/metrics)
- [scaler](traits/scaler)

View File

@ -0,0 +1,31 @@
---
title: Task
---
## 描述
描述运行完成代码或脚本的作业。
## 规范
列出 `Task` 类型 workload 的所有配置项。
```yaml
name: my-app-name
services:
my-service-name:
type: task
image: perl
count: 10
cmd: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]
```
## 属性
名称 | 描述 | 类型 | 是否必须 | 默认值
------------ | ------------- | ------------- | ------------- | -------------
cmd | 容器中运行的命令 | []string | false |
count | 指定并行运行的 task 数量 | int | true | 1
restart | 定义作业重启策略,值只能为 Never 或 OnFailure。 | string | true | Never
image | 你的服务使用的镜像 | string | true |

View File

@ -0,0 +1,66 @@
---
title: Webservice
---
## 描述
描述长期运行的,可伸缩的,容器化的服务,这些服务具有稳定的网络接口,可以接收来自客户的外部网络流量。 如果对于 Appfile 中定义的任何服务workload type 都被跳过,则默认使用“ webservice”类型。
## 规范
列出 `Webservice` workload 类型的所有配置项。
```yaml
name: my-app-name
services:
my-service-name:
type: webservice # could be skipped
image: oamdev/testapp:v1
cmd: ["node", "server.js"]
port: 8080
cpu: "0.1"
env:
- name: FOO
value: bar
- name: FOO
valueFrom:
secretKeyRef:
name: bar
key: bar
```
## 属性
名称 | 描述 | 类型 | 是否必须 | 默认值
------------ | ------------- | ------------- | ------------- | -------------
cmd | 容器中运行的命令 | []string | false |
env | 使用环境变量定义参数 | [[]env](#env) | false |
image | 你的服务所使用到的镜像 | string | true |
port | 你要将用户流浪发送到哪个端口 | int | true | 80
cpu | 用于服务的CPU单元数例如0.50.5 CPU内核11 CPU内核 | string | false |
### env
名称 | 描述 | 类型 | 是否必须 | 默认值
------------ | ------------- | ------------- | ------------- | -------------
name | 环境变量名 | string | true |
value | 环境变量值 | string | false |
valueFrom | 指定此变量值的源 | [valueFrom](#valueFrom) | false |
#### valueFrom
名称 | 描述 | 类型 | 是否必须 | 默认值
------------ | ------------- | ------------- | ------------- | -------------
secretKeyRef | 选择一个 pod 命名空间中的 secret 键 | [secretKeyRef](#secretKeyRef) | true |
##### secretKeyRef
名称 | 描述 | 类型 | 是否必须 | 默认值
------------ | ------------- | ------------- | ------------- | -------------
name | 要从 pod 的命名空间中选择的 secret 的名字 | string | true |
key | 选择的 secret 键。 必须是有效的 secret 键 | string | true |

View File

@ -0,0 +1,28 @@
---
title: Worker
---
## 描述
描述在后台长期运行,可拓展的容器化服务。它们不需要网络端点来接收外部流量。
## 规格
列出 `Worker` 类型 workload 的所有配置项。
```yaml
name: my-app-name
services:
my-service-name:
type: worker
image: oamdev/testapp:v1
cmd: ["node", "server.js"]
```
## 属性
名称 | 描述 | 类型 | 是否必须 | 默认值
------------ | ------------- | ------------- | ------------- | -------------
cmd | 容器中运行的命令 | []string | false |
image | 你的服务使用的镜像 | string | true |

View File

@ -0,0 +1,28 @@
---
title: KubeVela CLI
---
### Auto-completion
#### bash
```bash
To load completions in your current shell session:
$ source <(vela completion bash)
To load completions for every new session, execute once:
Linux:
$ vela completion bash > /etc/bash_completion.d/vela
MacOS:
$ vela completion bash > /usr/local/etc/bash_completion.d/vela
```
#### zsh
```bash
To load completions in your current shell session:
$ source <(vela completion zsh)
To load completions for every new session, execute once:
$ vela completion zsh > "${fpath[1]}/_vela"
```

View File

@ -0,0 +1,10 @@
# KubeVela Dashboard (WIP)
KubeVela has a simple client side dashboard for you to interact with. The functionality is equivalent to the vela cli.
```bash
$ vela dashboard
```
> NOTE: this feature is still under development.

View File

@ -0,0 +1,301 @@
---
title: FAQ
---
- [对比 X](#对比-x)
* [KubeVela 和 Helm 的区别?](#kubevela-和-helm-有什么区别)
- [问题](#问题)
- [Error: unable to create new content in namespace cert-manager because it is being terminated](#error-unable-to-create-new-content-in-namespace-cert-manager-because-it-is-being-terminated)
- [Error: ScopeDefinition exists](#error-scopedefinition-exists)
- [You have reached your pull rate limit](#you-have-reached-your-pull-rate-limit)
- [Warning: Namespace cert-manager exists](#warning-namespace-cert-manager-exists)
- [如何修复问题: MutatingWebhookConfiguration mutating-webhook-configuration exists?](#如何修复问题mutatingwebhookconfiguration-mutating-webhook-configuration-exists)
- [运维](#运维)
* [Autoscale如何在多个 Kkubernetes 集群上开启 metrics server](#autoscale-如何在多个-kubernetes-集群上开启-metrics-server-)
## 对比 X
### KubeVela 和 Helm 有什么区别?
KubeVela 是一个平台构建工具,用于创建基于 Kubernete 的易使用、可拓展的应用交付/管理系统。KubeVela 将 Helm 作为模板引擎和应用包的标准。但是 Helm 不是 KubeVela 唯一支持的模板模块。另一个同样最优先支持的是 CUE。
同时KubeVale 被设计为 Kubernetes 的一个控制器(即工作在服务端),即使是其 Helm 部分,也会安装一个 Helm Operator。
## 问题
### Error: unable to create new content in namespace cert-manager because it is being terminated
你可能偶尔会碰到如下问题。它发生在上一个 KubeVele 版本没有删除完成时。
```
$ vela install
- Installing Vela Core Chart:
install chart vela-core, version 0.1.0, desc : A Helm chart for Kube Vela core, contains 35 file
Failed to install the chart with error: serviceaccounts "cert-manager-cainjector" is forbidden: unable to create new content in namespace cert-manager because it is being terminated
failed to create resource
helm.sh/helm/v3/pkg/kube.(*Client).Update.func1
/home/runner/go/pkg/mod/helm.sh/helm/v3@v3.2.4/pkg/kube/client.go:190
...
Error: failed to create resource: serviceaccounts "cert-manager-cainjector" is forbidden: unable to create new content in namespace cert-manager because it is being terminated
```
稍事休息,然后在几秒内重试。
```
$ vela install
- Installing Vela Core Chart:
Vela system along with OAM runtime already exist.
Automatically discover capabilities successfully ✅ Add(0) Update(0) Delete(8)
TYPE CATEGORY DESCRIPTION
-task workload One-off task to run a piece of code or script to completion
-webservice workload Long-running scalable service with stable endpoint to receive external traffic
-worker workload Long-running scalable backend worker without network endpoint
-autoscale trait Automatically scale the app following certain triggers or metrics
-metrics trait Configure metrics targets to be monitored for the app
-rollout trait Configure canary deployment strategy to release the app
-route trait Configure route policy to the app
-scaler trait Manually scale the app
- Finished successfully.
```
手动应用所有 WorkloadDefinition 和 TraitDefinition manifests 以恢复所有功能。
```
$ kubectl apply -f charts/vela-core/templates/defwithtemplate
traitdefinition.core.oam.dev/autoscale created
traitdefinition.core.oam.dev/scaler created
traitdefinition.core.oam.dev/metrics created
traitdefinition.core.oam.dev/rollout created
traitdefinition.core.oam.dev/route created
workloaddefinition.core.oam.dev/task created
workloaddefinition.core.oam.dev/webservice created
workloaddefinition.core.oam.dev/worker created
$ vela workloads
Automatically discover capabilities successfully ✅ Add(8) Update(0) Delete(0)
TYPE CATEGORY DESCRIPTION
+task workload One-off task to run a piece of code or script to completion
+webservice workload Long-running scalable service with stable endpoint to receive external traffic
+worker workload Long-running scalable backend worker without network endpoint
+autoscale trait Automatically scale the app following certain triggers or metrics
+metrics trait Configure metrics targets to be monitored for the app
+rollout trait Configure canary deployment strategy to release the app
+route trait Configure route policy to the app
+scaler trait Manually scale the app
NAME DESCRIPTION
task One-off task to run a piece of code or script to completion
webservice Long-running scalable service with stable endpoint to receive external traffic
worker Long-running scalable backend worker without network endpoint
```
### Error: ScopeDefinition exists
你可能偶尔会碰到如下问题。它发生在存在一个老的 OAM Kubernetes Runtime 发行版时,或者你之前已经部署过 `ScopeDefinition`
```
$ vela install
- Installing Vela Core Chart:
install chart vela-core, version 0.1.0, desc : A Helm chart for Kube Vela core, contains 35 file
Failed to install the chart with error: ScopeDefinition "healthscopes.core.oam.dev" in namespace "" exists and cannot be imported into the current release: invalid ownership metadata; annotation validation error: key "meta.helm.sh/release-name" must equal "kubevela": current value is "oam"; annotation validation error: key "meta.helm.sh/release-namespace" must equal "vela-system": current value is "oam-system"
rendered manifests contain a resource that already exists. Unable to continue with install
helm.sh/helm/v3/pkg/action.(*Install).Run
/home/runner/go/pkg/mod/helm.sh/helm/v3@v3.2.4/pkg/action/install.go:274
...
Error: rendered manifests contain a resource that already exists. Unable to continue with install: ScopeDefinition "healthscopes.core.oam.dev" in namespace "" exists and cannot be imported into the current release: invalid ownership metadata; annotation validation error: key "meta.helm.sh/release-name" must equal "kubevela": current value is "oam"; annotation validation error: key "meta.helm.sh/release-namespace" must equal "vela-system": current value is "oam-system"
```
删除 `ScopeDefinition` "healthscopes.core.oam.dev" 然后重试.
```
$ kubectl delete ScopeDefinition "healthscopes.core.oam.dev"
scopedefinition.core.oam.dev "healthscopes.core.oam.dev" deleted
$ vela install
- Installing Vela Core Chart:
install chart vela-core, version 0.1.0, desc : A Helm chart for Kube Vela core, contains 35 file
Successfully installed the chart, status: deployed, last deployed time = 2020-12-03 16:26:41.491426 +0800 CST m=+4.026069452
WARN: handle workload template `containerizedworkloads.core.oam.dev` failed: no template found, you will unable to use this workload capabilityWARN: handle trait template `manualscalertraits.core.oam.dev` failed
: no template found, you will unable to use this trait capabilityAutomatically discover capabilities successfully ✅ Add(8) Update(0) Delete(0)
TYPE CATEGORY DESCRIPTION
+task workload One-off task to run a piece of code or script to completion
+webservice workload Long-running scalable service with stable endpoint to receive external traffic
+worker workload Long-running scalable backend worker without network endpoint
+autoscale trait Automatically scale the app following certain triggers or metrics
+metrics trait Configure metrics targets to be monitored for the app
+rollout trait Configure canary deployment strategy to release the app
+route trait Configure route policy to the app
+scaler trait Manually scale the app
- Finished successfully.
```
### You have reached your pull rate limit
当你查看 Pod kubevela-vela-core 的日志并发现如下问题时。
```
$ kubectl get pod -n vela-system -l app.kubernetes.io/name=vela-core
NAME READY STATUS RESTARTS AGE
kubevela-vela-core-f8b987775-wjg25 0/1 - 0 35m
```
>Error response from daemon: toomanyrequests: You have reached your pull rate limit. You may increase the limit by
>authenticating and upgrading: https://www.docker.com/increase-rate-limit
你可以换成 github 的镜像仓库。
```
$ docker pull ghcr.io/oam-dev/kubevela/vela-core:latest
```
### Warning: Namespace cert-manager exists
如果碰到以下问题,则可能存在一个 `cert-manager` 发行版,其 namespace 及 RBAC 相关资源与 KubeVela 存在冲突。
```
$ vela install
- Installing Vela Core Chart:
install chart vela-core, version 0.1.0, desc : A Helm chart for Kube Vela core, contains 35 file
Failed to install the chart with error: Namespace "cert-manager" in namespace "" exists and cannot be imported into the current release: invalid ownership metadata; label validation error: missing key "app.kubernetes.io/managed-by": must be set to "Helm"; annotation validation error: missing key "meta.helm.sh/release-name": must be set to "kubevela"; annotation validation error: missing key "meta.helm.sh/release-namespace": must be set to "vela-system"
rendered manifests contain a resource that already exists. Unable to continue with install
helm.sh/helm/v3/pkg/action.(*Install).Run
/home/runner/go/pkg/mod/helm.sh/helm/v3@v3.2.4/pkg/action/install.go:274
...
/opt/hostedtoolcache/go/1.14.12/x64/src/runtime/asm_amd64.s:1373
Error: rendered manifests contain a resource that already exists. Unable to continue with install: Namespace "cert-manager" in namespace "" exists and cannot be imported into the current release: invalid ownership metadata; label validation error: missing key "app.kubernetes.io/managed-by": must be set to "Helm"; annotation validation error: missing key "meta.helm.sh/release-name": must be set to "kubevela"; annotation validation error: missing key "meta.helm.sh/release-namespace": must be set to "vela-system"
```
尝试如下步骤修复这个问题。
- 删除 `cert-manager` 发行版
- 删除 `cert-manager` namespace
- 重装 KubeVela
```
$ helm delete cert-manager -n cert-manager
release "cert-manager" uninstalled
$ kubectl delete ns cert-manager
namespace "cert-manager" deleted
$ vela install
- Installing Vela Core Chart:
install chart vela-core, version 0.1.0, desc : A Helm chart for Kube Vela core, contains 35 file
Successfully installed the chart, status: deployed, last deployed time = 2020-12-04 10:46:46.782617 +0800 CST m=+4.248889379
Automatically discover capabilities successfully ✅ (no changes)
TYPE CATEGORY DESCRIPTION
task workload One-off task to run a piece of code or script to completion
webservice workload Long-running scalable service with stable endpoint to receive external traffic
worker workload Long-running scalable backend worker without network endpoint
autoscale trait Automatically scale the app following certain triggers or metrics
metrics trait Configure metrics targets to be monitored for the app
rollout trait Configure canary deployment strategy to release the app
route trait Configure route policy to the app
scaler trait Manually scale the app
- Finished successfully.
```
### 如何修复问题MutatingWebhookConfiguration mutating-webhook-configuration exists?
如果你部署的其他服务会安装 MutatingWebhookConfiguration mutating-webhook-configuration则安装 KubeVela 时会碰到如下问题。
```shell
- Installing Vela Core Chart:
install chart vela-core, version v0.2.1, desc : A Helm chart for Kube Vela core, contains 36 file
Failed to install the chart with error: MutatingWebhookConfiguration "mutating-webhook-configuration" in namespace "" exists and cannot be imported into the current release: invalid ownership metadata; label validation error: missing key "app.kubernetes.io/managed-by": must be set to "Helm"; annotation validation error: missing key "meta.helm.sh/release-name": must be set to "kubevela"; annotation validation error: missing key "meta.helm.sh/release-namespace": must be set to "vela-system"
rendered manifests contain a resource that already exists. Unable to continue with install
helm.sh/helm/v3/pkg/action.(*Install).Run
/home/runner/go/pkg/mod/helm.sh/helm/v3@v3.2.4/pkg/action/install.go:274
github.com/oam-dev/kubevela/pkg/commands.InstallOamRuntime
/home/runner/work/kubevela/kubevela/pkg/commands/system.go:259
github.com/oam-dev/kubevela/pkg/commands.(*initCmd).run
/home/runner/work/kubevela/kubevela/pkg/commands/system.go:162
github.com/oam-dev/kubevela/pkg/commands.NewInstallCommand.func2
/home/runner/work/kubevela/kubevela/pkg/commands/system.go:119
github.com/spf13/cobra.(*Command).execute
/home/runner/go/pkg/mod/github.com/spf13/cobra@v1.1.1/command.go:850
github.com/spf13/cobra.(*Command).ExecuteC
/home/runner/go/pkg/mod/github.com/spf13/cobra@v1.1.1/command.go:958
github.com/spf13/cobra.(*Command).Execute
/home/runner/go/pkg/mod/github.com/spf13/cobra@v1.1.1/command.go:895
main.main
/home/runner/work/kubevela/kubevela/references/cmd/cli/main.go:16
runtime.main
/opt/hostedtoolcache/go/1.14.13/x64/src/runtime/proc.go:203
runtime.goexit
/opt/hostedtoolcache/go/1.14.13/x64/src/runtime/asm_amd64.s:1373
Error: rendered manifests contain a resource that already exists. Unable to continue with install: MutatingWebhookConfiguration "mutating-webhook-configuration" in namespace "" exists and cannot be imported into the current release: invalid ownership metadata; label validation error: missing key "app.kubernetes.io/managed-by": must be set to "Helm"; annotation validation error: missing key "meta.helm.sh/release-name": must be set to "kubevela"; annotation validation error: missing key "meta.helm.sh/release-namespace": must be set to "vela-system"
```
要解决这个问题,请从 [KubeVela releases](https://github.com/oam-dev/kubevela/releases) 将 KubeVela Cli `vela` 版本升级到 `v0.2.2` 以上。
## 运维
### Autoscale: 如何在多个 Kubernetes 集群上开启 metrics server
运维 Autoscale 依赖 metrics server所以它在许多集群中都是开启的。请通过命令 `kubectl top nodes``kubectl top pods` 检查 metrics server 是否开启。
如果输出如下相似内容,那么 metrics 已经开启。
```shell
$ kubectl top nodes
NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
cn-hongkong.10.0.1.237 288m 7% 5378Mi 78%
cn-hongkong.10.0.1.238 351m 8% 5113Mi 74%
$ kubectl top pods
NAME CPU(cores) MEMORY(bytes)
php-apache-65f444bf84-cjbs5 0m 1Mi
wordpress-55c59ccdd5-lf59d 1m 66Mi
```
或者需要在你的 kubernetes 集群中手动开启 metrics 。
- ACK (Alibaba Cloud Container Service for Kubernetes)
Metrics server 已经开启。
- ASK (Alibaba Cloud Serverless Kubernetes)
Metrics server 已经在如下 [Alibaba Cloud console](https://cs.console.aliyun.com/) `Operations/Add-ons` 部分开启。
![](../../../resources/install-metrics-server-in-ASK.jpg)
如果你有更多问题,请访问 [metrics server 排错指导](https://help.aliyun.com/document_detail/176515.html) 。
- Kind
使用如下命令安装 metrics server或者可以安装 [最新版本](https://github.com/kubernetes-sigs/metrics-server#installation)。
```shell
$ kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.3.7/components.yaml
```
并且在通过 `kubectl edit deploy -n kube-system metrics-server` 加载的 yaml 文件中 `.spec.template.spec.containers` 下增加如下部分。
注意:这里只是一个示例,而不是用于生产级别的使用。
```
command:
- /metrics-server
- --kubelet-insecure-tls
```
- MiniKube
使用如下命令开启。
```shell
$ minikube addons enable metrics-server
```
享受在你的应用中 [设置 autoscale](../../extensions/set-autoscale)。

View File

@ -0,0 +1,10 @@
---
title: Restful API
---
import useBaseUrl from '@docusaurus/useBaseUrl';
<a
target="_blank"
href={useBaseUrl('/restful-api')}>
KubeVela Restful API
</a>

View File

@ -0,0 +1,20 @@
---
title: Ingress
---
## Description
Configures K8s ingress and service to enable web traffic for your service. Please use route trait in cap center for advanced usage.
## Specification
List of all configuration options for a `Ingress` trait.
```yaml```
## Properties
Name | Description | Type | Required | Default
------------ | ------------- | ------------- | ------------- | -------------
domain | | string | true |
http | | map[string]int | true |

View File

@ -0,0 +1,27 @@
---
title: Scaler
---
## 描述
配置你服务的副本数。
## 规范
列出 `Scaler` trait 的所有配置项。
```yaml
name: my-app-name
services:
my-service-name:
...
scaler:
replicas: 100
```
## 属性
名称 | 描述 | 类型 | 是否必须 | 默认值
------------ | ------------- | ------------- | ------------- | -------------
replicas | Workload 的副本数 | int | true | 1

View File

@ -0,0 +1,238 @@
---
title: 调配和使用云资源
---
> ⚠️ 本章节前置要求你的平台运维人员已经安装了 [cloud resources related capabilities](../platform-engineers/cloud-services)。
## 单一应用(单一云资源)调配和使用云资源
检查云资源组件的参数:
```shell
$ kubectl vela show alibaba-rds
# Properties
+---------------+------------------------------------------------+--------+----------+--------------------+
| NAME | DESCRIPTION | TYPE | REQUIRED | DEFAULT |
+---------------+------------------------------------------------+--------+----------+--------------------+
| engine | RDS engine | string | true | mysql |
| engineVersion | The version of RDS engine | string | true | 8.0 |
| instanceClass | The instance class for the RDS | string | true | rds.mysql.c1.large |
| username | RDS username | string | true | |
| secretName | Secret name which RDS connection will write to | string | true | |
+---------------+------------------------------------------------+--------+----------+--------------------+
```
以 ENV 的方式,使用服务绑定 trait 把云资源绑定到 workload 上。
如下示例,创建了一个包含云资源调配组件和使用组件的应用:
```yaml
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: webapp
spec:
components:
- name: express-server
type: webservice
properties:
image: zzxwill/flask-web-application:v0.3.1-crossplane
ports: 80
traits:
- type: service-binding
properties:
envMappings:
# environments refer to db-conn secret
DB_PASSWORD:
secret: db-conn
key: password # 1) If the env name is different from secret key, secret key has to be set.
endpoint:
secret: db-conn # 2) If the env name is the same as the secret key, secret key can be omitted.
username:
secret: db-conn
- name: sample-db
type: alibaba-rds
properties:
name: sample-db
engine: mysql
engineVersion: "8.0"
instanceClass: rds.mysql.c1.large
username: oamtest
secretName: db-conn
```
安装并验证应用
```shell
$ kubectl get application
NAME AGE
webapp 46m
$ kubectl port-forward deployment/express-server 80:80
Forwarding from 127.0.0.1:80 -> 80
Forwarding from [::1]:80 -> 80
Handling connection for 80
Handling connection for 80
```
![](../resources/crossplane-visit-application.jpg)
## 单一应用(两个云资源)调配和使用云资源
基于上面的小节 `单一应用(单一云资源)调配和使用云资源`
更新应用,让应用同时使用 OSS 云资源
```yaml
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: webapp
spec:
components:
- name: express-server
type: webservice
properties:
image: zzxwill/flask-web-application:v0.3.1-crossplane
ports: 80
traits:
- type: service-binding
properties:
envMappings:
# environments refer to db-conn secret
DB_PASSWORD:
secret: db-conn
key: password # 1) If the env name is different from secret key, secret key has to be set.
endpoint:
secret: db-conn # 2) If the env name is the same as the secret key, secret key can be omitted.
username:
secret: db-conn
# environments refer to oss-conn secret
BUCKET_NAME:
secret: oss-conn
key: Bucket
- name: sample-db
type: alibaba-rds
properties:
name: sample-db
engine: mysql
engineVersion: "8.0"
instanceClass: rds.mysql.c1.large
username: oamtest
secretName: db-conn
- name: sample-oss
type: alibaba-oss
properties:
name: velaweb
secretName: oss-conn
```
安装并验证应用
```shell
$ kubectl port-forward deployment/express-server 80:80
Forwarding from 127.0.0.1:80 -> 80
Forwarding from [::1]:80 -> 80
Handling connection for 80
Handling connection for 80
```
![](../resources/crossplane-visit-application-v2.jpg)
## 在不同的应用中调配和使用云资源
在此小节中,一个应用调配云资源,另外一个应用使用云资源
### 调配云资源
创建[应用](../application),实例化 `alibaba-rds` 类型 workload 的 RDS 组件来提供云资源
因为我们已经使用 ComponentDefinition 声明了 RDS 实例组件,并命名为 `alibaba-rds`,所以应用中定义的组件应该使用此类型。
```yaml
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: baas-rds
spec:
components:
- name: sample-db
type: alibaba-rds
properties:
name: sample-db
engine: mysql
engineVersion: "8.0"
instanceClass: rds.mysql.c1.large
username: oamtest
secretName: db-conn
```
安装应用RDS 实例就会自动地被调配可能需要一段时间才能正常起来大概2分钟。同时和应用相同的命名空间下会创建名为 `db-conn` 的 secret。
```shell
$ kubectl get application
NAME AGE
baas-rds 9h
$ kubectl get rdsinstance
NAME READY SYNCED STATE ENGINE VERSION AGE
sample-db-v1 True True Running mysql 8.0 9h
$ kubectl get secret
NAME TYPE DATA AGE
db-conn connection.crossplane.io/v1alpha1 4 9h
$ ✗ kubectl get secret db-conn -o yaml
apiVersion: v1
data:
endpoint: xxx==
password: yyy
port: MzMwNg==
username: b2FtdGVzdA==
kind: Secret
```
### 使用云资源
在此小节,我们会演示另外一个组件怎样使用 RDS 实例。
> 注意:如果该云资源有独立的生命周期,我们建议将云资源定义为一个独立的应用。
创建应用来使用云资源
```yaml
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: webapp
spec:
components:
- name: express-server
type: webconsumer
properties:
image: zzxwill/flask-web-application:v0.3.1-crossplane
ports: 80
dbSecret: db-conn
```
```shell
$ kubectl get application
NAME AGE
baas-rds 10h
webapp 14h
$ kubectl get deployment
NAME READY UP-TO-DATE AVAILABLE AGE
express-server-v1 1/1 1 1 9h
$ kubectl port-forward deployment/express-server 80:80
```
我们看到云资源已经正常地被应用使用了
![](../resources/crossplane-visit-application.jpg)

View File

@ -0,0 +1,181 @@
---
title: 检索 Applications
---
本章节我们将介绍如何检索 application 相关的资源。
## 获取 Application 列表
```shell
$ kubectl get application
NAME COMPONENT TYPE PHASE HEALTHY STATUS AGE
app-basic app-basic webservice running true 12d
website frontend webservice running true 4m54s
```
我们可以使用 application 缩写 `kubectl get app`
### 查看 Application 详情
```shell
$ kubectl get app website -o yaml
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
generation: 1
name: website
namespace: default
spec:
components:
- name: frontend
properties:
image: nginx
traits:
- properties:
cpuPercent: 60
max: 10
min: 1
type: cpuscaler
- properties:
image: fluentd
name: sidecar-test
type: sidecar
type: webservice
- name: backend
properties:
cmd:
- sleep
- "1000"
image: busybox
type: worker
status:
...
latestRevision:
name: website-v1
revision: 1
revisionHash: e9e062e2cddfe5fb
services:
- healthy: true
name: frontend
traits:
- healthy: true
type: cpuscaler
- healthy: true
type: sidecar
- healthy: true
name: backend
status: running
```
以下是需要我们了解的一些重要信息:
1. `status.latestRevision` 用于显示 application 当前运行的版本。
2. `status.services` 用于显示 application 中 component 的健康状态。
3. `status.status` 用于显示 application 的全局状态。
### 获取 Application 版本
KubeVela 会对 application 对每次 spec 变更生成新版本。
```shell
$ kubectl get apprev -l app.oam.dev/name=website
NAME AGE
website-v1 35m
```
## 检索 Components
我们可以检索出当前 KubeVela 中支持的 ComponentDefinition 列表。
```shell
kubectl get comp -n vela-system
NAME WORKLOAD-KIND DESCRIPTION
task Job Describes jobs that run code or a script to completion.
webservice Deployment Describes long-running, scalable, containerized services that have a stable network endpoint to receive external network traffic from customers.
worker Deployment Describes long-running, scalable, containerized services that running at backend. They do NOT have network endpoint to receive external network traffic.
```
正常情况下 ComponentDefinition 只能被同 namespace 下 application 引用,但是 `vela-system` namespace 下可以被所有 application 引用。
```shell
$ kubectl vela show webservice
# Properties
+------------------+----------------------------------------------------------------------------------+-----------------------+----------+---------+
| NAME | DESCRIPTION | TYPE | REQUIRED | DEFAULT |
+------------------+----------------------------------------------------------------------------------+-----------------------+----------+---------+
| cmd | Commands to run in the container | []string | false | |
| env | Define arguments by using environment variables | [[]env](#env) | false | |
| addRevisionLabel | | bool | true | false |
| image | Which image would you like to use for your service | string | true | |
| port | Which port do you want customer traffic sent to | int | true | 80 |
| cpu | Number of CPU units for the service, like `0.5` (0.5 CPU core), `1` (1 CPU core) | string | false | |
| volumes | Declare volumes and volumeMounts | [[]volumes](#volumes) | false | |
+------------------+----------------------------------------------------------------------------------+-----------------------+----------+---------+
##### volumes
+-----------+---------------------------------------------------------------------+--------+----------+---------+
| NAME | DESCRIPTION | TYPE | REQUIRED | DEFAULT |
+-----------+---------------------------------------------------------------------+--------+----------+---------+
| name | | string | true | |
| mountPath | | string | true | |
| type | Specify volume type, options: "pvc","configMap","secret","emptyDir" | string | true | |
+-----------+---------------------------------------------------------------------+--------+----------+---------+
## env
+-----------+-----------------------------------------------------------+-------------------------+----------+---------+
| NAME | DESCRIPTION | TYPE | REQUIRED | DEFAULT |
+-----------+-----------------------------------------------------------+-------------------------+----------+---------+
| name | Environment variable name | string | true | |
| value | The value of the environment variable | string | false | |
| valueFrom | Specifies a source the value of this var should come from | [valueFrom](#valueFrom) | false | |
+-----------+-----------------------------------------------------------+-------------------------+----------+---------+
### valueFrom
+--------------+--------------------------------------------------+-------------------------------+----------+---------+
| NAME | DESCRIPTION | TYPE | REQUIRED | DEFAULT |
+--------------+--------------------------------------------------+-------------------------------+----------+---------+
| secretKeyRef | Selects a key of a secret in the pod's namespace | [secretKeyRef](#secretKeyRef) | true | |
+--------------+--------------------------------------------------+-------------------------------+----------+---------+
#### secretKeyRef
+------+------------------------------------------------------------------+--------+----------+---------+
| NAME | DESCRIPTION | TYPE | REQUIRED | DEFAULT |
+------+------------------------------------------------------------------+--------+----------+---------+
| name | The name of the secret in the pod's namespace to select from | string | true | |
| key | The key of the secret to select from. Must be a valid secret key | string | true | |
+------+------------------------------------------------------------------+--------+----------+---------+
```
## 检索 Traits
我们可以检索出当前 KubeVela 中支持对 TraitDefinitions 。
```shell
$ kubectl get trait -n vela-system
NAME APPLIES-TO DESCRIPTION
cpuscaler [webservice worker] configure k8s HPA with CPU metrics for Deployment
ingress [webservice worker] Configures K8s ingress and service to enable web traffic for your service. Please use route trait in cap center for advanced usage.
scaler [webservice worker] Configures replicas for your service.
sidecar [webservice worker] inject a sidecar container into your app
```
正常情况下 TraitDefinition 只能被同 namespace 下的 application 引用,但是 `vela-system` namespace 下的可以被所有 application 引用。
我们可以用命令 `kubectl vela show` 查看指定 TraitDefinition 暴露的参数。
```shell
$ kubectl vela show sidecar
# Properties
+---------+-----------------------------------------+----------+----------+---------+
| NAME | DESCRIPTION | TYPE | REQUIRED | DEFAULT |
+---------+-----------------------------------------+----------+----------+---------+
| name | Specify the name of sidecar container | string | true | |
| image | Specify the image of sidecar container | string | true | |
| command | Specify the commands run in the sidecar | []string | false | |
+---------+-----------------------------------------+----------+----------+---------+
```

View File

@ -0,0 +1,370 @@
---
title: Dry-Run / Live-Diff
---
KubeVela 支持两种方式调试 applicationdry-run 和 live-diff。
## Dry-Run `Application`
Dry-run 将帮助我们了解哪些资源将被处理并部署到 Kubernetes 集群。另外该命令支持模拟运行与KubeVela的控制器相同的逻辑并在本地输出结果。
比如,我们 dry-run 下面 application
```yaml
# app.yaml
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: vela-app
spec:
components:
- name: express-server
type: webservice
properties:
image: crccheck/hello-world
port: 8000
traits:
- type: ingress
properties:
domain: testsvc.example.com
http:
"/": 8000
```
```shell
kubectl vela dry-run -f app.yaml
---
# Application(vela-app) -- Comopnent(express-server)
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.oam.dev/appRevision: ""
app.oam.dev/component: express-server
app.oam.dev/name: vela-app
workload.oam.dev/type: webservice
spec:
selector:
matchLabels:
app.oam.dev/component: express-server
template:
metadata:
labels:
app.oam.dev/component: express-server
spec:
containers:
- image: crccheck/hello-world
name: express-server
ports:
- containerPort: 8000
---
apiVersion: v1
kind: Service
metadata:
labels:
app.oam.dev/appRevision: ""
app.oam.dev/component: express-server
app.oam.dev/name: vela-app
trait.oam.dev/resource: service
trait.oam.dev/type: ingress
name: express-server
spec:
ports:
- port: 8000
targetPort: 8000
selector:
app.oam.dev/component: express-server
---
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
labels:
app.oam.dev/appRevision: ""
app.oam.dev/component: express-server
app.oam.dev/name: vela-app
trait.oam.dev/resource: ingress
trait.oam.dev/type: ingress
name: express-server
spec:
rules:
- host: testsvc.example.com
http:
paths:
- backend:
serviceName: express-server
servicePort: 8000
path: /
---
```
当前示例中application `vela-app` 依赖 KubeVela 内置的 component`webservice` 和 trait`ingress`)。我们也可以通过参数 `-d ` 或者 `--definitions` 指定本地 definition 文件。
参数 `-d ` 或者 `--definitions` 允许用户从本地文件导入指定的 definitions 以供 application 使用。
参数 `dry-run` 会将优先使用用户指定的 capabilities 。
## Live-Diff `Application`
Live-diff 将帮助我们预览本次升级 application 会有哪些变更,同时不会对现有集群产生影响。
本功能对于生产环境变更非常有用,同时还能保证升级可控。
本功能会在线上正在运行的版本与本地待升级版本之间生成差异信息。
最终差异结果将展示 application 以及子资源(比如 components 以及 traits的变更信息added/modified/removed/no_change
假设我们在 dry-run 环节已经部署 application 。
随后,我们列出上面 application 的 revisions 信息。
```shell
$ kubectl get apprev -l app.oam.dev/name=vela-app
NAME AGE
vela-app-v1 50s
```
假设我们将更新该 application
```yaml
# new-app.yaml
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: vela-app
spec:
components:
- name: express-server
type: webservice
properties:
image: crccheck/hello-world
port: 8080 # change port
cpu: 0.5 # add requests cpu units
- name: my-task # add a component
type: task
properties:
image: busybox
cmd: ["sleep", "1000"]
traits:
- type: ingress
properties:
domain: testsvc.example.com
http:
"/": 8080 # change port
```
执行 live-diff
```shell
kubectl vela live-diff -f new-app.yaml -r vela-app-v1
```
参数 `-r` 或者 `--revision` 用于指定正在运行中的 ApplicationRevision 名称,该版本将用于与更新版本进行比较。
参数 `-c` or `--context` 用于指定显示变更上下文行数,超出上线行数的未变更行将被省略。该功能对于如下场景非常有用:差异结果包含很多未更改的内容,而我们只想关注已更改的内容。
<details><summary> diff </summary>
```bash
---
# Application (vela-app) has been modified(*)
---
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
creationTimestamp: null
name: vela-app
namespace: default
spec:
components:
- name: express-server
properties:
+ cpu: 0.5
image: crccheck/hello-world
- port: 8000
+ port: 8080
+ type: webservice
+ - name: my-task
+ properties:
+ cmd:
+ - sleep
+ - "1000"
+ image: busybox
traits:
- properties:
domain: testsvc.example.com
http:
- /: 8000
+ /: 8080
type: ingress
- type: webservice
+ type: task
status:
batchRollingState: ""
currentBatch: 0
rollingState: ""
upgradedReadyReplicas: 0
upgradedReplicas: 0
---
## Component (express-server) has been modified(*)
---
apiVersion: core.oam.dev/v1alpha2
kind: Component
metadata:
creationTimestamp: null
labels:
app.oam.dev/name: vela-app
name: express-server
spec:
workload:
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.oam.dev/appRevision: ""
app.oam.dev/component: express-server
app.oam.dev/name: vela-app
workload.oam.dev/type: webservice
spec:
selector:
matchLabels:
app.oam.dev/component: express-server
template:
metadata:
labels:
app.oam.dev/component: express-server
spec:
containers:
- image: crccheck/hello-world
name: express-server
ports:
- - containerPort: 8000
+ - containerPort: 8080
status:
observedGeneration: 0
---
### Component (express-server) / Trait (ingress/service) has been removed(-)
---
- apiVersion: v1
- kind: Service
- metadata:
- labels:
- app.oam.dev/appRevision: ""
- app.oam.dev/component: express-server
- app.oam.dev/name: vela-app
- trait.oam.dev/resource: service
- trait.oam.dev/type: ingress
- name: express-server
- spec:
- ports:
- - port: 8000
- targetPort: 8000
- selector:
- app.oam.dev/component: express-server
---
### Component (express-server) / Trait (ingress/ingress) has been removed(-)
---
- apiVersion: networking.k8s.io/v1beta1
- kind: Ingress
- metadata:
- labels:
- app.oam.dev/appRevision: ""
- app.oam.dev/component: express-server
- app.oam.dev/name: vela-app
- trait.oam.dev/resource: ingress
- trait.oam.dev/type: ingress
- name: express-server
- spec:
- rules:
- - host: testsvc.example.com
- http:
- paths:
- - backend:
- serviceName: express-server
- servicePort: 8000
- path: /
---
## Component (my-task) has been added(+)
---
+ apiVersion: core.oam.dev/v1alpha2
+ kind: Component
+ metadata:
+ creationTimestamp: null
+ labels:
+ app.oam.dev/name: vela-app
+ name: my-task
+ spec:
+ workload:
+ apiVersion: batch/v1
+ kind: Job
+ metadata:
+ labels:
+ app.oam.dev/appRevision: ""
+ app.oam.dev/component: my-task
+ app.oam.dev/name: vela-app
+ workload.oam.dev/type: task
+ spec:
+ completions: 1
+ parallelism: 1
+ template:
+ spec:
+ containers:
+ - command:
+ - sleep
+ - "1000"
+ image: busybox
+ name: my-task
+ restartPolicy: Never
+ status:
+ observedGeneration: 0
---
### Component (my-task) / Trait (ingress/service) has been added(+)
---
+ apiVersion: v1
+ kind: Service
+ metadata:
+ labels:
+ app.oam.dev/appRevision: ""
+ app.oam.dev/component: my-task
+ app.oam.dev/name: vela-app
+ trait.oam.dev/resource: service
+ trait.oam.dev/type: ingress
+ name: my-task
+ spec:
+ ports:
+ - port: 8080
+ targetPort: 8080
+ selector:
+ app.oam.dev/component: my-task
---
### Component (my-task) / Trait (ingress/ingress) has been added(+)
---
+ apiVersion: networking.k8s.io/v1beta1
+ kind: Ingress
+ metadata:
+ labels:
+ app.oam.dev/appRevision: ""
+ app.oam.dev/component: my-task
+ app.oam.dev/name: vela-app
+ trait.oam.dev/resource: ingress
+ trait.oam.dev/type: ingress
+ name: my-task
+ spec:
+ rules:
+ - host: testsvc.example.com
+ http:
+ paths:
+ - backend:
+ serviceName: my-task
+ servicePort: 8080
+ path: /
```
</details>

View File

@ -0,0 +1,74 @@
---
title: Labels and Annotations
---
We will introduce how to add labels and annotations to your Application.
## List Traits
```bash
$ kubectl get trait -n vela-system
NAME APPLIES-TO DESCRIPTION
annotations ["webservice","worker"] Add annotations for your Workload.
cpuscaler ["webservice","worker"] configure k8s HPA with CPU metrics for Deployment
ingress ["webservice","worker"] Configures K8s ingress and service to enable web traffic for your service. Please use route trait in cap center for advanced usage.
labels ["webservice","worker"] Add labels for your Workload.
scaler ["webservice","worker"] Configures replicas for your service by patch replicas field.
sidecar ["webservice","worker"] inject a sidecar container into your app
```
You can use `label` and `annotations` traits to add labels and annotations for your workload.
## Apply Application
Let's use `label` and `annotations` traits in your Application.
```shell
# myapp.yaml
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: myapp
spec:
components:
- name: express-server
type: webservice
properties:
image: crccheck/hello-world
port: 8000
traits:
- type: labels
properties:
"release": "stable"
- type: annotations
properties:
"description": "web application"
```
Apply this Application.
```shell
kubectl apply -f myapp.yaml
```
Check the workload has been created successfully.
```bash
$ kubectl get deployments
NAME READY UP-TO-DATE AVAILABLE AGE
express-server 1/1 1 1 15s
```
Check the `labels` trait.
```bash
$ kubectl get deployments express-server -o jsonpath='{.spec.template.metadata.labels}'
{"app.oam.dev/component":"express-server","release": "stable"}
```
Check the `annotations` trait.
```bash
$ kubectl get deployments express-server -o jsonpath='{.spec.template.metadata.annotations}'
{"description":"web application"}
```

View File

@ -0,0 +1,9 @@
---
title: Monitoring
---
TBD, Content Overview
1. We will move all installation scripts to a separate doc may be named Install Capability Providers (e.g. https://knative.dev/docs/install/install-extensions/)Install monitoring trait(along with prometheus/grafana controller).
2. Add monitoring trait into Application.
3. View it with grafana.

View File

@ -0,0 +1,8 @@
---
title: Build CI/CD Pipeline
---
TBD, Content Overview
1. install argo/teckton.
2. run the pipeline example: https://github.com/oam-dev/kubevela/tree/master/docs/examples/argo

View File

@ -0,0 +1,233 @@
---
title: 多集群应用部署
---
## 简介
为确保业务的高可用并最大化服务的吞吐量,现代应用基础设施会涉及到将应用部署到多个群集。在本部分中,我们将介绍如何使用 KubeVela 在支持以下功能的情况下实现跨多个集群部署应用程序:
- 滚动更新Rolling Upgrade为满足应用持续部署的要求并且以安全的方式进行灰度升级通常需要按步分不同批次进行部署并对结果进行验证。
- 流量切换Traffic shifting在滚动升级应用时它需要将流量拆分一部分导向旧版本一部分导向新版本从而保证验证新版本的同时保证服务始终是可用的。
## AppDeployment CRD
`AppDeployment` CRD 的设计就是用来满足上述需求的。下面是 API 的简单介绍:
```yaml
apiVersion: core.oam.dev/v1beta1
kind: AppDeployment
metadata:
name: sample-appdeploy
spec:
traffic:
hosts:
- example.com
http:
- match:
# match any requests to 'example.com/example-app'
- uri:
prefix: "/example-app"
# split traffic 50/50 on v1/v2 versions of the app
weightedTargets:
- revisionName: example-app-v1
componentName: testsvc
port: 80
weight: 50
- revisionName: example-app-v2
componentName: testsvc
port: 80
weight: 50
appRevisions:
- # Name of the AppRevision.
# Each modification to Application would generate a new AppRevision.
revisionName: example-app-v1
# Cluster specific workload placement config
placement:
- clusterSelector:
# You can select Clusters by name or labels.
# If multiple clusters is selected, one will be picked via a unique hashing algorithm.
labels:
tier: production
name: prod-cluster-1
distribution:
replicas: 5
- # If no clusterSelector is given, it will use the host cluster in which this CR exists
distribution:
replicas: 5
- revisionName: example-app-v2
placement:
- clusterSelector:
labels:
tier: production
name: prod-cluster-1
distribution:
replicas: 5
- distribution:
replicas: 5
```
## Cluster CRD
在上述示例里,`placement` 策略通过选择 `Cluster CRD` 中定义的集群来指定要部署的集群。如下所示:
```yaml
apiVersion: core.oam.dev/v1beta1
kind: Cluster
metadata:
name: prod-cluster-1
labels:
tier: production
spec:
kubeconfigSecretRef:
name: kubeconfig-cluster-1 # the secret name
```
这个 secret 中必须要包含说明 `kubeconfig` 秘钥的 `config` 字段:
```yaml
apiVersion: v1
kind: Secret
metadata:
name: kubeconfig-cluster-1
data:
config: ... # kubeconfig data
```
## 快速开始
下面是一个你可以自己动手尝试的示例。 所有的 yaml 文件都在 [`docs/examples/appdeployment/`](https://github.com/oam-dev/kubevela/tree/master/docs/examples/appdeployment)中。
你必须运行那个目录下所有的 command。
1. 创建应用
```bash
$ cat <<EOF | kubectl apply -f -
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: example-app
annotations:
app.oam.dev/revision-only: "true"
spec:
components:
- name: testsvc
type: webservice
properties:
addRevisionLabel: true
image: crccheck/hello-world
port: 8000
EOF
```
这会创建 `example-app-v1` AppRevision. 查看一下:
```bash
$ kubectl get applicationrevisions.core.oam.dev
NAME AGE
example-app-v1 116s
```
> 注意: 通过 `app.oam.dev/revision-only: "true"` 注释, 上面的 `Application` 资源不会创建任何pod实例并会把真正的部署进程留给 `AppDeployment` 处理。
2. 之后使用上面的 AppRevision 来创建 AppDeployment。
```bash
$ kubectl apply -f appdeployment-1.yaml
```
> 注意: 为了使 AppDeployment 能正常工作,你的工作负载对象必须有一个`spec.replicas`字段以进行扩展。
3. 现在你可以查看到有1个 deployment 和2个 pod 实例已经被部署了。
```bash
$ kubectl get deploy
NAME READY UP-TO-DATE AVAILABLE AGE
testsvc-v1 2/2 2 0 27s
```
4. 更新应用字段:
```bash
$ cat <<EOF | kubectl apply -f -
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: example-app
annotations:
app.oam.dev/revision-only: "true"
spec:
components:
- name: testsvc
type: webservice
properties:
addRevisionLabel: true
image: nginx
port: 80
EOF
```
这会创建一个新的 `example-app-v2` AppRevision。检查一下:
```bash
$ kubectl get applicationrevisions.core.oam.dev
NAME
example-app-v1
example-app-v2
```
5. 接下来我们更新 AppDeployment 对象,尝试部署 2 个不同的应用版本:
```bash
$ kubectl apply -f appdeployment-2.yaml
```
(可选)如果你已安装了 Istio你可以将 AppDeployment 结合流量切分的功能一起使用:
```bash
# set up gateway if not yet
$ kubectl apply -f gateway.yaml
$ kubectl apply -f appdeployment-2-traffic.yaml
```
注意:为了使流量切分能正常工作,你必须在工作负载的 cue templates 中设置下面所示的pod label(详见 [webservice.cue](https://github.com/oam-dev/kubevela/blob/master/hack/vela-templates/cue/webservice.cue)):
```shell
"app.oam.dev/component": context.name
"app.oam.dev/appRevision": context.appRevision
```
6. 现在你可以查看到每一个版本有 1 个 deployment 和 1 个 pod。
```bash
$ kubectl get deploy
NAME READY UP-TO-DATE AVAILABLE AGE
testsvc-v1 1/1 1 1 2m14s
testsvc-v2 1/1 1 1 8s
```
(可选)来验证下流量切分:
```bash
# run this in another terminal
$ kubectl -n istio-system port-forward service/istio-ingressgateway 8080:80
Forwarding from 127.0.0.1:8080 -> 8080
Forwarding from [::1]:8080 -> 8080
# The command should return pages of either docker whale or nginx in 50/50
$ curl -H "Host: example-app.example.com" http://localhost:8080/
```
7. Cleanup:
```bash
kubectl delete appdeployments.core.oam.dev --all
kubectl delete applications.core.oam.dev --all
```

View File

@ -0,0 +1,38 @@
---
title: Rollout
---
## Description
Configures Canary deployment strategy for your application.
## Specification
List of all configuration options for a `Rollout` trait.
```yaml
servcies:
express-server:
...
rollout:
replicas: 2
stepWeight: 50
interval: "10s"
```
## Properties
Name | Description | Type | Required | Default
------------ | ------------- | ------------- | ------------- | -------------
interval | Schedule interval time | string | true | 30s
stepWeight | Weight percent of every step in rolling update | int | true | 50
replicas | Total replicas of the workload | int | true | 2
## Conflicts With
### `Autoscale`
When `Rollout` and `Autoscle` traits are attached to the same service, they two will fight over the number of instances during rollout. Thus, it's by design that `Rollout` will take over replicas control (specified by `.replicas` field) during rollout.
> Note: in up coming releases, KubeVela will introduce a separate section in Appfile to define release phase configurations such as `Rollout`.

View File

@ -0,0 +1,88 @@
---
title: Define Application Health Probe
---
In this documentation, we will show how to define health probe for application
1.Create health scope instance.
```yaml
apiVersion: core.oam.dev/v1alpha2
kind: HealthScope
metadata:
name: health-check
namespace: default
spec:
probe-interval: 60
workloadRefs:
- apiVersion: apps/v1
kind: Deployment
name: express-server
```
2. Create an application with the health scope
```yaml
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: vela-app
spec:
components:
- name: express-server
type: webservice
properties:
image: crccheck/hello-world
port: 8080 # change port
cpu: 0.5 # add requests cpu units
scopes:
healthscopes.core.oam.dev: health-check
```
3. Check app status, will see health scope in `status.service.scopes`
```shell
$ kubectl get app vela-app -o yaml
```
```yaml
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: vela-app
...
status:
...
services:
- healthy: true
name: express-server
scopes:
- apiVersion: core.oam.dev/v1alpha2
kind: HealthScope
name: health-check
```
4.Check health scope status
```shell
$ kubectl get healthscope health-check -o yaml
```
```yaml
apiVersion: core.oam.dev/v1alpha2
kind: HealthScope
metadata:
name: health-check
...
spec:
probe-interval: 60
workloadRefs:
- apiVersion: apps/v1
kind: Deployment
name: express-server
status:
healthConditions:
- componentName: express-server
diagnosis: 'Ready:1/1 '
healthStatus: HEALTHY
targetWorkload:
apiVersion: apps/v1
kind: Deployment
name: express-server
scopeHealthCondition:
healthStatus: HEALTHY
healthyWorkloads: 1
total: 1
```

View File

@ -0,0 +1,48 @@
---
title: Progressive Rollout RoadMap
---
Here are some workitems on the roadmap
## Embed rollout in an application
We will support embedded rollout settings in an application. In this way, any changes to the
application will naturally roll out in a controlled manner instead of a sudden replace.
## Add support to trait upgrade
There are three trait related workitems that complement each other
- we need to make sure that traits that work on the previous application still work on the new
application
- traits themselves also need a controlled way to upgrade instead of replacing the old in one shot
- rollout controller should suppress conflicting traits (like HPA/Scalar) during the rollout process
## Add metrics based rolling checking
We will integrate with prometheus and use the metrics generated by the application to control the
flow of the rollout. This part will be very similar to flagger.
## Add traffic shifting support
We will add traffic shifting based upgrading strategy like canary, A/B testing. We plan to support
Istio in our first version. This part will be very similar to flagger.
## Support upgrading more than one component
Currently, we can only upgrade one component at a time. We will support upgrading more components in
one application at once.
## Support Helm Rollout strategy
Currently, we only support upgrading k8s resources. We will support helm based workload in the
future.
## Add more restrictions on what part of the rollout plan can be changed during rolling
Here are some examples
- the BatchPartition field cannot decrease beyond the current batch
- the RolloutBatches field can only change the part after the current batch
- the ComponentList field cannot be changed after rolling starts
- the RolloutStrategy/TargetSize/NumBatches cannot be changed

View File

@ -0,0 +1,50 @@
---
title: Autoscale
---
## Description
Automatically scales workloads by resource utilization metrics or cron triggers.
## Specification
List of all configuration options for a `Autoscale` trait.
```yaml
name: testapp
services:
express-server:
...
autoscale:
min: 1
max: 4
cron:
startAt: "14:00"
duration: "2h"
days: "Monday, Thursday"
replicas: 2
timezone: "America/Los_Angeles"
cpuPercent: 10
```
## Properties
Name | Description | Type | Required | Default
------------ | ------------- | ------------- | ------------- | -------------
min | Minimal replicas of the workload | int | true |
max | Maximal replicas of the workload | int | true |
cpuPercent | Specify the value for CPU utilization, like 80, which means 80% | int | false |
cron | Cron type auto-scaling. Just for `appfile`, not available for Cli usage | [cron](#cron) | false |
### cron
Name | Description | Type | Required | Default
------------ | ------------- | ------------- | ------------- | -------------
startAt | The time to start scaling, like `08:00` | string | true |
duration | For how long the scaling will last | string | true |
days | Several workdays or weekends, like "Monday, Tuesday" | string | true |
replicas | The target replicas to be scaled to | int | true |
timezone | Timezone, like "America/Los_Angeles" | string | true |

View File

@ -0,0 +1,98 @@
---
title: Expose Application
---
> ⚠️ 本章节要求当前集群已经安装 ingress 。
如果我们需要将 application 中的服务暴露对外,只需要在该 application 中添加 `ingress` 的 trait。
我们使用 kubectl 查看 ingress 数据结构。
```shell
$ kubectl vela show ingress
# Properties
+--------+------------------------------------------------------------------------------+----------------+----------+---------+
| NAME | DESCRIPTION | TYPE | REQUIRED | DEFAULT |
+--------+------------------------------------------------------------------------------+----------------+----------+---------+
| http | Specify the mapping relationship between the http path and the workload port | map[string]int | true | |
| domain | Specify the domain you want to expose | string | true | |
+--------+------------------------------------------------------------------------------+----------------+----------+---------+
```
随后,修改且部署下面 application
```yaml
# vela-app.yaml
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: first-vela-app
spec:
components:
- name: express-server
type: webservice
properties:
image: crccheck/hello-world
port: 8000
traits:
- type: ingress
properties:
domain: testsvc.example.com
http:
"/": 8000
```
```bash
$ kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/docs/examples/vela-app.yaml
application.core.oam.dev/first-vela-app created
```
当状态 `PHASE``running``HEALTHY``true`,说明 application 已经被正确部署:
```bash
$ kubectl get application first-vela-app -w
NAME COMPONENT TYPE PHASE HEALTHY STATUS AGE
first-vela-app express-server webservice healthChecking 14s
first-vela-app express-server webservice running true 42s
```
同时,我们可以通过下面命令行查看 trait 的详情:
```shell
$ kubectl get application first-vela-app -o yaml
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: first-vela-app
namespace: default
spec:
...
services:
- healthy: true
name: express-server
traits:
- healthy: true
message: 'Visiting URL: testsvc.example.com, IP: 47.111.233.220'
type: ingress
status: running
...
```
最后,我们可以通过以下方式访问部署的服务。
```
$ curl -H "Host:testsvc.example.com" http://<your ip address>/
<xmp>
Hello World
## .
## ## ## ==
## ## ## ## ## ===
/""""""""""""""""\___/ ===
~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~
\______ o _,/
\ \ _,'
`'--.._\..--''
</xmp>
```

View File

@ -0,0 +1,36 @@
---
title: Metrics
---
## 描述
配置你服务的监控指标。
## 规范
列出 `Metrics` trait 的所有配置项。
```yaml
name: my-app-name
services:
my-service-name:
...
metrics:
format: "prometheus"
port: 8080
path: "/metrics"
scheme: "http"
enabled: true
```
## 属性
名称 | 描述 | 类型 | 是否必须 | 默认值
------------ | ------------- | ------------- | ------------- | -------------
path | 服务的指标路径 | string | true | /metrics
format | 指标的格式,默认为 prometheus | string | true | prometheus
scheme | 检索数据的方式,支持 `http``https` | string | true | http
enabled | | bool | true | true
port | 指标的端口,默认自动暴露 | int | true | 0
selector | Pods 的 label selector默认自动暴露 | map[string]string | false |

View File

@ -0,0 +1,43 @@
---
title: Route
---
## 描述
配置到你服务的外部入口。
## Specification
列出 `Route` trait 的所有配置项。
```yaml
name: my-app-name
services:
my-service-name:
...
route:
domain: example.com
issuer: tls
rules:
- path: /testapp
rewriteTarget: /
```
## 属性
名称 | 描述 | 类型 | 是否必须 | 默认值
------------ | ------------- | ------------- | ------------- | -------------
domain | 域名名称 | string | true | empty
issuer | | string | true | empty
rules | | [[]rules](#rules) | false |
provider | | string | false |
ingressClass | | string | false |
### 规则
名称 | 描述 | 类型 | 是否必须 | 默认值
------------ | ------------- | ------------- | ------------- | -------------
path | | string | true |
rewriteTarget | | string | true | empty

View File

@ -0,0 +1,68 @@
---
title: Scale
---
In the [Deploy Application](../application) section, we use `cpuscaler` trait as an auto-scaler for the sample application.
## Manuel Scale
You can use scale your application manually by using `scaler` trait.
```shell
$ kubectl vela show scaler
# Properties
+----------+--------------------------------+------+----------+---------+
| NAME | DESCRIPTION | TYPE | REQUIRED | DEFAULT |
+----------+--------------------------------+------+----------+---------+
| replicas | Specify replicas of workload | int | true | 1 |
+----------+--------------------------------+------+----------+---------+
```
Deploy the application.
```yaml
# sample-manual.yaml
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: website
spec:
components:
- name: frontend
type: webservice
properties:
image: nginx
traits:
- type: scaler
properties:
replicas: 2
- type: sidecar
properties:
name: "sidecar-test"
image: "fluentd"
- name: backend
type: worker
properties:
image: busybox
cmd:
- sleep
- '1000'
```
Change and Apply the sample application:
```shell
$ kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/docs/examples/enduser/sample-manual.yaml
application.core.oam.dev/website configured
```
After a while, you can see the underlying deployment of `frontend` component has two replicas now.
```shell
$ kubectl get deploy -l app.oam.dev/name=website
NAME READY UP-TO-DATE AVAILABLE AGE
backend 1/1 1 1 19h
frontend 2/2 2 2 19h
```
To scale up or scale down, you can just modify the `replicas` field of `scaler` trait and apply the application again.

View File

@ -0,0 +1,102 @@
---
title: 使用 Sidecar
---
在本章节中,我们会展示如何使用 `sidecar` trait 来收集日志。
## 查看 Sidecar 的使用手册
```shell
$ kubectl vela show sidecar
# Properties
+---------+-----------------------------------------+-----------------------+----------+---------+
| NAME | DESCRIPTION | TYPE | REQUIRED | DEFAULT |
+---------+-----------------------------------------+-----------------------+----------+---------+
| name | Specify the name of sidecar container | string | true | |
| cmd | Specify the commands run in the sidecar | []string | false | |
| image | Specify the image of sidecar container | string | true | |
| volumes | Specify the shared volume path | [[]volumes](#volumes) | false | |
+---------+-----------------------------------------+-----------------------+----------+---------+
## volumes
+-----------+-------------+--------+----------+---------+
| NAME | DESCRIPTION | TYPE | REQUIRED | DEFAULT |
+-----------+-------------+--------+----------+---------+
| name | | string | true | |
| path | | string | true | |
+-----------+-------------+--------+----------+---------+
```
## 安装应用
应用的组件 `log-gen-worker` 和 sidecar 共享相同的日志数据目录。sidecar 会重新输出日志到标准输出中。
```yaml
# app.yaml
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: vela-app-with-sidecar
spec:
components:
- name: log-gen-worker
type: worker
properties:
image: busybox
cmd:
- /bin/sh
- -c
- >
i=0;
while true;
do
echo "$i: $(date)" >> /var/log/date.log;
i=$((i+1));
sleep 1;
done
volumes:
- name: varlog
mountPath: /var/log
type: emptyDir
traits:
- type: sidecar
properties:
name: count-log
image: busybox
cmd: [ /bin/sh, -c, 'tail -n+1 -f /var/log/date.log']
volumes:
- name: varlog
path: /var/log
```
安装这个应用
```shell
kubectl apply -f app.yaml
```
检查应用生成的 workload
```shell
$ kubectl get pod
NAME READY STATUS RESTARTS AGE
log-gen-worker-76945f458b-k7n9k 2/2 Running 0 90s
```
查看 sidecar 的输出
```shell
$ kubectl logs -f log-gen-worker-76945f458b-k7n9k count-log
0: Fri Apr 16 11:08:45 UTC 2021
1: Fri Apr 16 11:08:46 UTC 2021
2: Fri Apr 16 11:08:47 UTC 2021
3: Fri Apr 16 11:08:48 UTC 2021
4: Fri Apr 16 11:08:49 UTC 2021
5: Fri Apr 16 11:08:50 UTC 2021
6: Fri Apr 16 11:08:51 UTC 2021
7: Fri Apr 16 11:08:52 UTC 2021
8: Fri Apr 16 11:08:53 UTC 2021
9: Fri Apr 16 11:08:54 UTC 2021
```

View File

@ -0,0 +1,97 @@
---
title: 使用 Volumes
---
我们将会介绍如何在应用中使用基本和定制化的 volumes。
## 使用基本的 Volume
`worker``webservice` 都可以使用多个通用的 volumes包括 `persistenVolumeClaim`, `configMap`, `secret`, and `emptyDir`。你应该使用名称属性来区分不同类型的 volumes。为了简洁我们使用 `pvc` 代替 `persistenVolumeClaim`
```yaml
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: website
spec:
components:
- name: frontend
type: webservice
properties:
image: nginx
volumes:
- name: "my-pvc"
mountPath: "/var/www/html1"
type: "pvc" # persistenVolumeClaim type volume
claimName: "myclaim"
- name: "my-cm"
mountPath: "/var/www/html2"
type: "configMap" # configMap type volume (specifying items)
cmName: "myCmName"
items:
- key: "k1"
path: "./a1"
- key: "k2"
path: "./a2"
- name: "my-cm-noitems"
mountPath: "/var/www/html22"
type: "configMap" # configMap type volume (not specifying items)
cmName: "myCmName2"
- name: "mysecret"
type: "secret" # secret type volume
mountPath: "/var/www/html3"
secretName: "mysecret"
- name: "my-empty-dir"
type: "emptyDir" # emptyDir type volume
mountPath: "/var/www/html4"
```
你需要确保使用的 volume 资源在集群中是可用的。
## 使用自定义类型的 volume
使用者可以自己扩展定制化类型的 volume例如 AWS ElasticBlockStore
Azure disk Alibaba Cloud OSS。
为了可以使用定制化类型的 volume我们需要先安装特定的 Trait。
```shell
$ kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/docs/examples/app-with-volumes/td-awsEBS.yaml
```
```shell
$ kubectl vela show aws-ebs-volume
+-----------+----------------------------------------------------------------+--------+----------+---------+
| NAME | DESCRIPTION | TYPE | REQUIRED | DEFAULT |
+-----------+----------------------------------------------------------------+--------+----------+---------+
| name | The name of volume. | string | true | |
| mountPath | | string | true | |
| volumeID | Unique id of the persistent disk resource. | string | true | |
| fsType | Filesystem type to mount. | string | true | ext4 |
| partition | Partition on the disk to mount. | int | false | |
| readOnly | ReadOnly here will force the ReadOnly setting in VolumeMounts. | bool | true | false |
+-----------+----------------------------------------------------------------+--------+----------+---------+
```
然后我们可以在应用的定义中使用 aws-ebs volumes。
```yaml
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: app-worker
spec:
components:
- name: myworker
type: worker
properties:
image: "busybox"
cmd:
- sleep
- "1000"
traits:
- type: aws-ebs-volume
properties:
name: "my-ebs"
mountPath: "/myebs"
volumeID: "my-ebs-id"
```

View File

@ -0,0 +1,323 @@
---
title: 安装
---
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
如果你之前已经安装了 KubeVela chart请直接阅读[升级](#upgrade)步骤。
## 1. 设置 Kubernetes 集群
要求:
- Kubernetes 集群版本 >= v1.15.0
- 安装并配置 kubectl
KubeVela是一个简单的自定义控制器可以安装在任何 Kubernetes 集群上,包括托管产品或您自己的集群。唯一的要求是请确保已安装并启用了 [ingress-nginx](https://kubernetes.github.io/ingress-nginx/deploy/)。
> 注意:如果您没有使用 minikube 或 kind请确保[安装或启用 ingress-nginx](https://kubernetes.github.io/ingress-nginx/deploy/)。
<Tabs
className="unique-tabs"
defaultValue="minikube"
values={[
{label: 'Minikube', value: 'minikube'},
{label: 'KinD', value: 'kind'},
]}>
<TabItem value="minikube">
遵循 minikube [安装指南](https://minikube.sigs.k8s.io/docs/start/)。
安装 minikube 后,创建一个集群:
```shell script
minikube start
```
安装 ingress:
```shell script
minikube addons enable ingress
```
</TabItem>
<TabItem value="kind">
请按照本[指南](https://kind.sigs.k8s.io/docs/user/quick-start/#installation)进行安装。
然后拉起一个 kind 集群:
```shell script
cat <<EOF | kind create cluster --image=kindest/node:v1.18.15 --config=-
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
kubeadmConfigPatches:
- |
kind: InitConfiguration
nodeRegistration:
kubeletExtraArgs:
node-labels: "ingress-ready=true"
extraPortMappings:
- containerPort: 80
hostPort: 80
protocol: TCP
- containerPort: 443
hostPort: 443
protocol: TCP
EOF
```
然后安装 [ingress for kind](https://kind.sigs.k8s.io/docs/user/ingress/#ingress-nginx):
```shell script
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/kind/deploy.yaml
```
</TabItem>
</Tabs>
## 2. 安装 KubeVela 控制器
这些步骤将安装 KubeVela 控制器及其依赖项。
1. 添加 KubeVela helm chart 仓库
```shell script
helm repo add kubevela https://kubevelacharts.oss-cn-hangzhou.aliyuncs.com/core
```
2. 更新 chart 仓库
```shell script
helm repo update
```
3. 安装 KubeVela
```shell script
helm install --create-namespace -n vela-system kubevela kubevela/vela-core
```
默认情况下,它将使用自签名证书来启用 webhook [kube-webhook-certgen](https://github.com/jet/kube-webhook-certgen)
如果你想尝试最新的 master 分支, 在命令 ` helm search` 中添加标志 ` --devel` 以选择预发布版本
格式为 `<下个版本号>-rc-master` 的版本, 表示这是建立在 `master` 分支上的下一个候选发行版应,
例如 `0.4.0-rc-master`。
```shell script
helm search repo kubevela/vela-core -l --devel
```
```console
NAME CHART VERSION APP VERSION DESCRIPTION
kubevela/vela-core 0.4.0-rc-master 0.4.0-rc-master A Helm chart for KubeVela core
kubevela/vela-core 0.3.2 0.3.2 A Helm chart for KubeVela core
kubevela/vela-core 0.3.1 0.3.1 A Helm chart for KubeVela core
```
尝试使用以下命令进行安装。
```shell script
helm install --create-namespace -n vela-system kubevela kubevela/vela-core --version <next_version>-rc-master
```
```console
NAME: kubevela
LAST DEPLOYED: Thu Apr 1 19:41:30 2021
NAMESPACE: vela-system
STATUS: deployed
REVISION: 1
NOTES:
Welcome to use the KubeVela! Enjoy your shipping application journey!
```
4. (可选)使用 cert-manager 安装 Kubevela
如果已经安装了 cert-manager则可以使用它来生成证书。
> 你需要在 KubeVela chart 安装之前安装 cert-manager。
```shell script
helm repo add jetstack https://charts.jetstack.io
helm repo update
helm install cert-manager jetstack/cert-manager --namespace cert-manager --version v1.2.0 --create-namespace --set installCRDs=true
```
安装 KubeVela启用 certmanager:
```shell script
helm install --create-namespace -n vela-system --set admissionWebhooks.certManager.enabled=true kubevela kubevela/vela-core
```
## 3. (可选)安装 flux2
这个安装步骤是可选的,如果你想将 [Helm Chart](https://helm.sh/) 作为 KubeVela 的能力,那么这一步是必需的。
KubeVela 依赖于 [fluxcd/flux2](https://github.com/fluxcd/flux2) 的几个 CRD 和控制器。
| CRD | Controller Image |
| ----------- | ----------- |
| helmrepositories.source.toolkit.fluxcd.io | fluxcd/source-controller:v0.9.0 |
| helmcharts.source.toolkit.fluxcd.io | - |
| buckets.source.toolkit.fluxcd.io | - |
| gitrepositories.source.toolkit.fluxcd.io | - |
| helmreleases.helm.toolkit.fluxcd.io | fluxcd/helm-controller:v0.8.0 |
你可以从其[官方网站](https://github.com/fluxcd/flux2)上安装整个 flux2也可以安装由 KubeVela 提供的近包含最小集的 helm chart
```shell
$ helm install --create-namespace -n flux-system helm-flux http://oam.dev/catalog/helm-flux2-0.1.0.tgz
```
## 4. (可选)安装 KubeVela CLI 命令行工具
这里有三种获取 KubeVela Cli 的方法:
<Tabs
className="unique-tabs"
defaultValue="script"
values={[
{label: 'Script', value: 'script'},
{label: 'Homebrew', value: 'homebrew'},
{label: 'Download directly from releases', value: 'download'},
]}>
<TabItem value="script">
** macOS/Linux **
```shell script
curl -fsSl https://kubevela.io/script/install.sh | bash
```
**Windows**
```shell script
powershell -Command "iwr -useb https://kubevela.io/script/install.ps1 | iex"
```
</TabItem>
<TabItem value="homebrew">
**macOS/Linux**
首先更新你的 brew 缓存。
```shell script
brew update
```
安装 KubeVela 客户端.
```shell script
brew install kubevela
```
</TabItem>
<TabItem value="download">
- 从[版本发布页面](https://github.com/oam-dev/kubevela/releases)下载最新的 `vela` 二进制版本.
- 解压缩 vela 二进制文件并将其添加到 `$PATH` 开始使用.
```shell script
sudo mv ./vela /usr/local/bin/vela
```
> 已知问题(https://github.com/oam-dev/kubevela/issues/625):
> 如果你使用 mac它会报告 “vela” 无法打开,因为开发者无法验证。
>
> 新版 MacOS 在运行你下载的没有使用苹果开发者密钥的软件方面更加严格,我们还没有为 KubeVela 做这方面支持。
> 你可以打开你的'系统首选项' -> '安全与隐私' ->一般,点击'允许无论如何'暂时修复它。
</TabItem>
</Tabs>
## 5. (可选)从群集同步功能
如果要从 `vela` cli 运行应用程序,则应首先同步功能,如下所示:
```shell script
vela components
```
```console
NAME NAMESPACE WORKLOAD DESCRIPTION
task vela-system jobs.batch Describes jobs that run code or a script to completion.
webservice vela-system deployments.apps Describes long-running, scalable, containerized services
that have a stable network endpoint to receive external
network traffic from customers.
worker vela-system deployments.apps Describes long-running, scalable, containerized services
that running at backend. They do NOT have network endpoint
to receive external network traffic.
```
## 6. (可选)清理
<details>
运行:
```shell script
helm uninstall -n vela-system kubevela
rm -r ~/.vela
```
这将卸载 KubeVela 服务器组件及其依赖组件。
这也会清理本地 CLI 缓存。
然后清除 CRDs (CRDs 默认不会通过 helm 移除):
```shell script
kubectl delete crd \
appdeployments.core.oam.dev \
applicationconfigurations.core.oam.dev \
applicationcontexts.core.oam.dev \
applicationdeployments.core.oam.dev \
applicationrevisions.core.oam.dev \
applications.core.oam.dev \
approllouts.core.oam.dev \
componentdefinitions.core.oam.dev \
components.core.oam.dev \
containerizedworkloads.core.oam.dev \
healthscopes.core.oam.dev \
manualscalertraits.core.oam.dev \
podspecworkloads.standard.oam.dev \
scopedefinitions.core.oam.dev \
traitdefinitions.core.oam.dev \
workloaddefinitions.core.oam.dev
```
</details>
# 升级
如果你已经安装了 KubeVela并希望升级到新版本可以按照以下说明进行操作。
## 步骤 1. 更新 helm 仓库
```shell
helm repo update
```
你可以通过运行以下命令获取新版本的 KubeVela 图表:
```shell
helm search repo kubevela/vela-core -l
```
## Step 2. 升级 KubeVela CRDs
```shell
kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/charts/vela-core/crds/core.oam.dev_componentdefinitions.yaml
kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/charts/vela-core/crds/core.oam.dev_workloaddefinitions.yaml
kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/charts/vela-core/crds/core.oam.dev_traitdefinitions.yaml
kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/charts/vela-core/crds/core.oam.dev_applications.yaml
kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/charts/vela-core/crds/core.oam.dev_approllouts.yaml
kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/charts/vela-core/crds/core.oam.dev_applicationrevisions.yaml
kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/charts/vela-core/crds/core.oam.dev_scopedefinitions.yaml
kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/charts/vela-core/crds/core.oam.dev_appdeployments.yaml
kubectl apply -f https://raw.githubusercontent.com/oam-dev/kubevela/master/charts/vela-core/crds/core.oam.dev_applicationcontexts.yaml
```
> Tips: 如果你碰到类似 `* is invalid: spec.scope: Invalid value: "Namespaced": filed is immutable`问题. 请删除带有错误的 crd
并重新应用 KubeVela crds。
```shell
kubectl delete crd \
scopedefinitions.core.oam.dev \
traitdefinitions.core.oam.dev \
workloaddefinitions.core.oam.dev
```
## Step 3. 升级 KubeVela helm chart
```shell
helm upgrade --install --create-namespace --namespace vela-system kubevela kubevela/vela-core --version <the_new_version>
```

View File

@ -0,0 +1,69 @@
---
title: 简介
slug: /
---
![alt](resources/KubeVela-01.png)
## 动机
云原生技术正在以 Kubernetes 作为通用抽象层朝着跨云追求一致的应用交付迈进。Kubernetes 虽然在底层基础结构细节抽象方面表现出色,但带来了额外的复杂性,应用程序开发人员需要了解 Pod、端口暴露、权限、资源声明和管理、 CRD 等概念。我们看到了陡峭的学习曲线,缺乏面向开发人员的抽象影响了用户体验,降低了生产率,导致了生产中的意外错误或配置错误。人们开始质疑这场革命的价值:“我为什么要烦恼所有这些细节?”。
另一方面,抽象 Kubernetes 来满足开发人员的需求是一个非常依赖经验判断的过程,只有在决策者是平台构建者的情况下,最终的抽象才有意义。不幸的是,当今的平台构建者面临以下难题:
*没有工具或框架可让他们轻松构建用户友好且高度可扩展的抽象*
因此,尽管 Kubernetes 具有可扩展性,但许多平台至今基本上都是使用内部附加机制进行的受限抽象。这种方式几乎无法满足开发人员的需求,且无法将其扩展到更广泛的场景,更不用说充分利用丰富的 Kubernetes 生态系统。
最终结果是,开发人员抱怨这些平台过于僵化,响应功能要求或改进速度太慢。平台建设者确实希望提供帮助,但是工程上的努力却是艰巨的:平台中任何简单的 API 更改都可能很容易成为围绕经验主义进行抽象设计的马拉松式谈判。
## 什么是 KubeVela
对于平台开发人员而言KubeVela 作为框架,通过执行以下操作来减轻构建以开发人员为中心的平台的烦恼:
- 以开发人员为中心。KubeVela 通过引入 * Application * 的概念来抽象基础架构级别的原语,从而捕获微服务的完整部署,然后根据应用程序的需求构建操作功能。
- 本地扩展。* Application * 由模块化的构建块组成,这些构建模块支持 [CUELang](https://github.com/cuelang/cue) 和 [Helm](https://helm.sh) 作为模板引擎。这使你能够以乐高风格抽象 Kubernetes 的功能,并通过简单的 `kubectl apply -f` 将它们发布给最终用户。对抽象模板所做的更改将在运行时生效,无需重新编译或重新部署 KubeVela。
- 简单而可靠的抽象机制。与大多数 IaC(基础设施即代码)解决方案不同KubeVela 中的抽象是用 [Kubernetes Control Loop](https://kubernetes.io/docs/concepts/architecture/controller/) 构建的,所以它们永远不会在集群中留下配置漂移。作为 [Kubernetes 自定义资源](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/)KubeVela可以与任何 CI/CD 或 GitOps 工具无缝协作,不需要进行集成工作。
有了 KubeVela平台构建者终于有了工具支持可以设计易于使用的抽象并以高信心和低周转时间将它们交付给终端用户。
对于终端用户(例如应用程序开发人员)来说,这种用 KubeVela 构建的抽象将使他们能够以最少的努力设计并向 Kubernetes 发布应用程序 —他们要做的只是定义一个简单的应用程序,可以轻松地与任何 CI/CD 集成,而不需要管理一些基础设施细节。
## 比较
### KubeVela vs. 平台即服务 (PaaS)
典型的例子是 Heroku 和 Cloud Foundry。它们提供完整的应用程序管理功能旨在提高开发人员的体验和效率。在这种情况下KubeVela 可以提供类似的体验。
最大的区别在于**灵活性**。
KubeVela 是一个 Kubernetes 插件,它允许您通过定义自己的抽象来简单地为最终用户服务,这是通过将 Kubernetes API 资源模板化为集群中以应用程序为中心的抽象来实现的。与这种机制相比,现有的大多数 PaaS 系统受到严格限制不够灵活,也就是说,他们必须对所支持的应用程序和功能的类型实施约束,并且随着应用程序需求的增长,它们总是超出 PaaS 系统的能力 - 这在 KubeVela 中永远不会发生。
### KubeVela vs. Serverless
无服务器平台(如 AWS Lambda)为部署无服务器应用程序提供了非凡的用户体验和灵活性。然而,这些平台在可扩展性方面施加了更多的限制。它们可以说是“硬编码”的 PaaS。
Kubernetes 基于无服务器的平台,如 Knative,OpenFaaS 通过将它们自己注册为新的 workload 和 traits 可以很容易与 KubeVela 集成。即使是 AWS Lambda也有一个通过 Crossplane 开发的工具将其与 KubeVela 集成的成功案例。
### KubeVela vs. 平台无关的开发人员工具
典型的例子是 Hashicorp Waypoint。Waypoint 是一个面向开发人员的工具,它引入了一致的工作流(即构建、部署、发布)在不同平台上发布应用程序。
KubeVela 可以作为一个受支持的平台集成到 Waypoint 中。在这种情况下,开发人员可以使用 Waypoint 工作流来管理应用程序,利用 KubeVela 构建的抽象(例如 application、rollout、ingress、autoscaling 等)。
### KubeVela vs. Helm
Helm 是 Kubernetes 的一个包管理器,为 Kubernetes 提供打包、安装和升级一组 YAML 文件。KubeVela 充分利用 Helm 来打包功能依赖,而且 Helm 也是*应用程序*抽象背后的核心模板引擎之一。
尽管 KubeVela 本身不是一个包管理器,但它是平台构建者的核心引擎,用于以简单和可重复的方式创建上层平台。
### KubeVela vs. Kubernetes
KubeVela 是一个 Kubernetes 插件,用于构建更高级别的抽象。它利用 [Open Application Model](https://github.com/oam-dev/spec) 和本地 Kubernetes 的可扩展性来解决一个困难的问题 -- 让在 Kubernetes 上发布应用程序变得愉快。
## 入门
现在,让我们[开始](./quick-start.md)使用 KubeVela

View File

@ -0,0 +1,73 @@
---
title: 安装 kubectl 插件
---
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
安装 vela kubectl 插件可以帮助你更简单的交付云原生应用!
## 安装
你可以通过以下方式安装 `kubectl vela`
<Tabs
className="unique-tabs"
defaultValue="krew"
values={[
{label: 'Krew', value: 'krew'},
{label: 'Script', value: 'script'},
]}>
<TabItem value="krew">
1. 安装并且设置 [krew](https://krew.sigs.k8s.io/docs/user-guide/setup/install/)
2. 更新 kubectl 插件列表:
```shell
kubectl krew update
```
3. 安装 kubectl vela
```shell script
kubectl krew install vela
```
</TabItem>
<TabItem value="script">
**macOS/Linux**
```shell script
curl -fsSl https://kubevela.io/script/install-kubectl-vela.sh | bash
```
你也可以在 [release 页面(>= v1.0.3](https://github.com/oam-dev/kubevela/releases)手动下载二进制可执行文件Kubectl 会自动从你的系统路径中找到它。
</TabItem>
</Tabs>
## 使用
```shell
$ kubectl vela -h
A Highly Extensible Platform Engine based on Kubernetes and Open Application Model.
Usage:
kubectl vela [flags]
kubectl vela [command]
Available Commands:
Flags:
-h, --help help for vela
dry-run Dry Run an application, and output the K8s resources as
result to stdout, only CUE template supported for now
live-diff Dry-run an application, and do diff on a specific app
revison. The provided capability definitions will be used
during Dry-run. If any capabilities used in the app are not
found in the provided ones, it will try to find from
cluster.
show Show the reference doc for a workload type or trait
version Prints out build version information
Use "kubectl vela [command] --help" for more information about a command.
```

View File

@ -0,0 +1,92 @@
---
title: Extend CRD Operator as Component Type
---
Let's use [OpenKruise](https://github.com/openkruise/kruise) as example of extend CRD as KubeVela Component.
**The mechanism works for all CRD Operators**.
### Step 1: Install the CRD controller
You need to [install the CRD controller](https://github.com/openkruise/kruise#quick-start) into your K8s system.
### Step 2: Create Component Definition
To register Cloneset(one of the OpenKruise workloads) as a new workload type in KubeVela, the only thing needed is to create an `ComponentDefinition` object for it.
A full example can be found in this [cloneset.yaml](https://github.com/oam-dev/catalog/blob/master/registry/cloneset.yaml).
Several highlights are list below.
#### 1. Describe The Workload Type
```yaml
...
annotations:
definition.oam.dev/description: "OpenKruise cloneset"
...
```
A one line description of this component type. It will be shown in helper commands such as `$ vela components`.
#### 2. Register it's underlying CRD
```yaml
...
workload:
definition:
apiVersion: apps.kruise.io/v1alpha1
kind: CloneSet
...
```
This is how you register OpenKruise Cloneset's API resource (`fapps.kruise.io/v1alpha1.CloneSet`) as the workload type.
KubeVela uses Kubernetes API resource discovery mechanism to manage all registered capabilities.
#### 4. Define Template
```yaml
...
schematic:
cue:
template: |
output: {
apiVersion: "apps.kruise.io/v1alpha1"
kind: "CloneSet"
metadata: labels: {
"app.oam.dev/component": context.name
}
spec: {
replicas: parameter.replicas
selector: matchLabels: {
"app.oam.dev/component": context.name
}
template: {
metadata: labels: {
"app.oam.dev/component": context.name
}
spec: {
containers: [{
name: context.name
image: parameter.image
}]
}
}
}
}
parameter: {
// +usage=Which image would you like to use for your service
// +short=i
image: string
// +usage=Number of pods in the cloneset
replicas: *5 | int
}
```
### Step 3: Register New Component Type to KubeVela
As long as the definition file is ready, you just need to apply it to Kubernetes.
```bash
$ kubectl apply -f https://raw.githubusercontent.com/oam-dev/catalog/master/registry/cloneset.yaml
```
And the new component type will immediately become available for developers to use in KubeVela.

View File

@ -0,0 +1,349 @@
---
title: Crossplane
---
云服务是应用程序的一部分。
## 云服务是 Component 还是 Trait?
可以考虑以下做法:
- 使用 `ComponentDefinition` 的场景:
- 你想要允许最终用户明确声明云服务的实例并使用它,并在删除应用程序时释放该实例。
- 使用 `TraitDefinition` 的场景:
- 你不想让最终用户拥有声明或发布云服务的任何控制权,而只想给他们消费云服务,甚至可以由其他系统管理的云服务的方式。在这种情况下,会广泛使用 `Service Binding` 特性。
在本文档中,我们将以阿里云的 RDS关系数据库服务和阿里云的 OSS对象存储服务为例。在单个应用程序中它们是 Traits在多个应用程序中它们是 Components。此机制与其他云提供商相同。
## 安装和配置 Crossplane
KubeVela 使用 [Crossplane](https://crossplane.io/) 作为云服务提供商。请参阅 [Installation](https://github.com/crossplane/provider-alibaba/releases/tag/v0.5.0) 安装 Crossplane Alibaba provider v0.5.0。
如果你想配置任何其他 Crossplane providers请参阅 [Crossplane Select a Getting Started Configuration](https://crossplane.io/docs/v1.1/getting-started/install-configure.html#select-a-getting-started-configuration)。
```
$ kubectl crossplane install provider crossplane/provider-alibaba:v0.5.0
# 注意这里的 xxx 和 yyy 是你自己云资源的 AccessKey 和 SecretKey。
$ kubectl create secret generic alibaba-account-creds -n crossplane-system --from-literal=accessKeyId=xxx --from-literal=accessKeySecret=yyy
$ kubectl apply -f provider.yaml
```
`provider.yaml` 如下。
```yaml
apiVersion: v1
kind: Namespace
metadata:
name: crossplane-system
---
apiVersion: alibaba.crossplane.io/v1alpha1
kind: ProviderConfig
metadata:
name: default
spec:
credentials:
source: Secret
secretRef:
namespace: crossplane-system
name: alibaba-account-creds
key: credentials
region: cn-beijing
```
注意:我们目前仅使用阿里提供的 Crossplane。但是在不久的将来我们将使用 [Crossplane](https://crossplane.io/) 作为 Kubernetes 的云资源供应商。
## 注册 ComponentDefinition 和 TraitDefinition
### 注册 ComponentDefinition `alibaba-rds` 为 RDS 云资源生产者
将工作负载类型 `alibaba-rds` 注册到 KubeVela。
```yaml
apiVersion: core.oam.dev/v1beta1
kind: ComponentDefinition
metadata:
name: alibaba-rds
namespace: vela-system
annotations:
definition.oam.dev/description: "Alibaba Cloud RDS Resource"
spec:
workload:
definition:
apiVersion: database.alibaba.crossplane.io/v1alpha1
kind: RDSInstance
schematic:
cue:
template: |
output: {
apiVersion: "database.alibaba.crossplane.io/v1alpha1"
kind: "RDSInstance"
spec: {
forProvider: {
engine: parameter.engine
engineVersion: parameter.engineVersion
dbInstanceClass: parameter.instanceClass
dbInstanceStorageInGB: 20
securityIPList: "0.0.0.0/0"
masterUsername: parameter.username
}
writeConnectionSecretToRef: {
namespace: context.namespace
name: parameter.secretName
}
providerConfigRef: {
name: "default"
}
deletionPolicy: "Delete"
}
}
parameter: {
// +usage=RDS engine
engine: *"mysql" | string
// +usage=The version of RDS engine
engineVersion: *"8.0" | string
// +usage=The instance class for the RDS
instanceClass: *"rds.mysql.c1.large" | string
// +usage=RDS username
username: string
// +usage=Secret name which RDS connection will write to
secretName: string
}
```
### 注册 ComponentDefinition `alibaba-oss` 为 OSS 云资源生产者
```yaml
apiVersion: core.oam.dev/v1beta1
kind: ComponentDefinition
metadata:
name: alibaba-oss
namespace: vela-system
annotations:
definition.oam.dev/description: "Alibaba Cloud RDS Resource"
spec:
workload:
definition:
apiVersion: oss.alibaba.crossplane.io/v1alpha1
kind: Bucket
schematic:
cue:
template: |
output: {
apiVersion: "oss.alibaba.crossplane.io/v1alpha1"
kind: "Bucket"
spec: {
name: parameter.name
acl: parameter.acl
storageClass: parameter.storageClass
dataRedundancyType: parameter.dataRedundancyType
writeConnectionSecretToRef: {
namespace: context.namespace
name: parameter.secretName
}
providerConfigRef: {
name: "default"
}
deletionPolicy: "Delete"
}
}
parameter: {
// +usage=OSS bucket name
name: string
// +usage=The access control list of the OSS bucket
acl: *"private" | string
// +usage=The storage type of OSS bucket
storageClass: *"Standard" | string
// +usage=The data Redundancy type of OSS bucket
dataRedundancyType: *"LRS" | string
// +usage=Secret name which RDS connection will write to
secretName: string
}
```
### 引用 Secret 注册 ComponentDefinition `webconsumer`
```yaml
apiVersion: core.oam.dev/v1beta1
kind: ComponentDefinition
metadata:
name: webconsumer
annotations:
definition.oam.dev/description: A Deployment provides declarative updates for Pods and ReplicaSets
spec:
workload:
definition:
apiVersion: apps/v1
kind: Deployment
schematic:
cue:
template: |
output: {
apiVersion: "apps/v1"
kind: "Deployment"
spec: {
selector: matchLabels: {
"app.oam.dev/component": context.name
}
template: {
metadata: labels: {
"app.oam.dev/component": context.name
}
spec: {
containers: [{
name: context.name
image: parameter.image
if parameter["cmd"] != _|_ {
command: parameter.cmd
}
if parameter["dbSecret"] != _|_ {
env: [
{
name: "username"
value: dbConn.username
},
{
name: "endpoint"
value: dbConn.endpoint
},
{
name: "DB_PASSWORD"
value: dbConn.password
},
]
}
ports: [{
containerPort: parameter.port
}]
if parameter["cpu"] != _|_ {
resources: {
limits:
cpu: parameter.cpu
requests:
cpu: parameter.cpu
}
}
}]
}
}
}
}
parameter: {
// +usage=Which image would you like to use for your service
// +short=i
image: string
// +usage=Commands to run in the container
cmd?: [...string]
// +usage=Which port do you want customer traffic sent to
// +short=p
port: *80 | int
// +usage=Referred db secret
// +insertSecretTo=dbConn
dbSecret?: string
// +usage=Number of CPU units for the service, like `0.5` (0.5 CPU core), `1` (1 CPU core)
cpu?: string
}
dbConn: {
username: string
endpoint: string
password: string
}
```
关键词是 annotation `// + insertSecretTo = dbConn`KubeVela 将知道该参数是 K8s 的 secret它将解析该 secret 并将数据绑定到 CUE 接口 `dbConn` 中。
`output` 可以引用 `dbConn` 获取数据。`dbConn` 的名称没有限制。
关键词是 `+insertSecretTo`,它定义了数据绑定机制。以上只是一个例子。
### 准备 TraitDefinition `service-binding` 进行 env-secret mapping
至于应用程序中的数据绑定KubeVela 建议定义一个 trait 以完成工作。我们已经准备了一个方便的 trait。此 trait 非常适合将资源的信息绑定到 pod spec 的环境变量中.
```yaml
apiVersion: core.oam.dev/v1beta1
kind: TraitDefinition
metadata:
annotations:
definition.oam.dev/description: "binding cloud resource secrets to pod env"
name: service-binding
spec:
appliesToWorkloads:
- webservice
- worker
schematic:
cue:
template: |
patch: {
spec: template: spec: {
// +patchKey=name
containers: [{
name: context.name
// +patchKey=name
env: [
for envName, v in parameter.envMappings {
name: envName
valueFrom: {
secretKeyRef: {
name: v.secret
if v["key"] != _|_ {
key: v.key
}
if v["key"] == _|_ {
key: envName
}
}
}
},
]
}]
}
}
parameter: {
// +usage=The mapping of environment variables to secret
envMappings: [string]: [string]: string
}
```
借助这种 `service-binding` trait开发人员可以显式设置参数 `envMappings`,以映射所有环境变量。例子如下。
```yaml
...
traits:
- type: service-binding
properties:
envMappings:
# environments refer to db-conn secret
DB_PASSWORD:
secret: db-conn
key: password # 1) If the env name is different from secret key, secret key has to be set.
endpoint:
secret: db-conn # 2) If the env name is the same as the secret key, secret key can be omitted.
username:
secret: db-conn
# environments refer to oss-conn secret
BUCKET_NAME:
secret: oss-conn
key: Bucket
...
```
你可以通过查看 [the end user usage workflow](../end-user/components/cloud-services) 了解其使用方式。

View File

@ -0,0 +1,246 @@
---
title: 高级功能
---
作为数据配置语言CUE 对于自定义结构体支持一些黑魔法。
## 循环渲染多个资源
你可以在 `outputs` 定义 for 循环。
> ⚠️注意,本示例中 `parameter` 必须是字典类型。
如下所示,该示例将展示如何在 trait 中渲染多个 Kubernetes Services
```yaml
apiVersion: core.oam.dev/v1beta1
kind: TraitDefinition
metadata:
name: expose
spec:
schematic:
cue:
template: |
parameter: {
http: [string]: int
}
outputs: {
for k, v in parameter.http {
"\(k)": {
apiVersion: "v1"
kind: "Service"
spec: {
selector:
app: context.name
ports: [{
port: v
targetPort: v
}]
}
}
}
}
```
上面 trait 对象可以在以下 Application 被使用:
```yaml
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: testapp
spec:
components:
- name: express-server
type: webservice
properties:
...
traits:
- type: expose
properties:
http:
myservice1: 8080
myservice2: 8081
```
## Trait Definition 中请求 HTTP 接口
Trait Definition 中可以发送 HTTP 请求并借助字段 `processing` 将响应结果用于渲染资源。
你可以在 `processing.http` 字段下定义 HTTP 请求所需的字段,包括:`method` `url` `body` `header``trailer` ,响应将会被存储在 `processing.output` 字段中。
> 此处需要确认目标 HTTP 服务返回数据格式为 **JSON**
随后你可以在 `patch` 或者 `output/outputs` 字段中引用 `processing.output` 自动中的返回数据。
如下所示:
```yaml
apiVersion: core.oam.dev/v1beta1
kind: TraitDefinition
metadata:
name: auth-service
spec:
schematic:
cue:
template: |
parameter: {
serviceURL: string
}
processing: {
output: {
token?: string
}
// The target server will return a JSON data with `token` as key.
http: {
method: *"GET" | string
url: parameter.serviceURL
request: {
body?: bytes
header: {}
trailer: {}
}
}
}
patch: {
data: token: processing.output.token
}
```
以上示例,该 Trait Definition 将发送请求获取 `token` 信息,并将数据插入到给定到 component 实例中。
## 数据传递
TraitDefinition 可以从给定的 ComponentDefinition 中读取已经被生成的 API 资源(从 `output` and `outputs` 中被渲染)。
> KubeVela 会确保 ComponentDefinition 会先于 TraitDefinition 被渲染出来。
具体来说,`context.output` 字段中会包含已经被渲染的 workload API 资源(特指 GVK 已经在 ComponentDefinition 中 `spec.workload` 字段定义的资源),同时 `context.outputs.<xx>`
字段中会包含其他已经被渲染的非 workload API 资源。
下面是数据传递的示例:
```yaml
apiVersion: core.oam.dev/v1beta1
kind: ComponentDefinition
metadata:
name: worker
spec:
workload:
definition:
apiVersion: apps/v1
kind: Deployment
schematic:
cue:
template: |
output: {
apiVersion: "apps/v1"
kind: "Deployment"
spec: {
selector: matchLabels: {
"app.oam.dev/component": context.name
}
template: {
metadata: labels: {
"app.oam.dev/component": context.name
}
spec: {
containers: [{
name: context.name
image: parameter.image
ports: [{containerPort: parameter.port}]
envFrom: [{
configMapRef: name: context.name + "game-config"
}]
if parameter["cmd"] != _|_ {
command: parameter.cmd
}
}]
}
}
}
}
outputs: gameconfig: {
apiVersion: "v1"
kind: "ConfigMap"
metadata: {
name: context.name + "game-config"
}
data: {
enemies: parameter.enemies
lives: parameter.lives
}
}
parameter: {
// +usage=Which image would you like to use for your service
// +short=i
image: string
// +usage=Commands to run in the container
cmd?: [...string]
lives: string
enemies: string
port: int
}
---
apiVersion: core.oam.dev/v1beta1
kind: TraitDefinition
metadata:
name: ingress
spec:
schematic:
cue:
template: |
parameter: {
domain: string
path: string
exposePort: int
}
// trait template can have multiple outputs in one trait
outputs: service: {
apiVersion: "v1"
kind: "Service"
spec: {
selector:
app: context.name
ports: [{
port: parameter.exposePort
targetPort: context.output.spec.template.spec.containers[0].ports[0].containerPort
}]
}
}
outputs: ingress: {
apiVersion: "networking.k8s.io/v1beta1"
kind: "Ingress"
metadata:
name: context.name
labels: config: context.outputs.gameconfig.data.enemies
spec: {
rules: [{
host: parameter.domain
http: {
paths: [{
path: parameter.path
backend: {
serviceName: context.name
servicePort: parameter.exposePort
}
}]
}
}]
}
}
```
关于 `worker` `ComponentDefinition` 渲染期间的一些细节:
1. workload渲染完成的 Kubernetes Deployment 资源将存储在 `context.output` 字段中。
2. 非 workload其他渲染完成的资源将存储在 `context.outputs.<xx>` 字段中,其中 `<xx>` 在每个 `template.outputs` 字段中名字都是唯一的。
综上,`TraitDefinition` 可以从 `context` 字段读取完成渲染的 API 资源(比如:`context.outputs.gameconfig.data.enemies`)。

View File

@ -0,0 +1,541 @@
---
title: CUE 入门
---
本章节将详细介绍关于如何使用 CUE 封装和抽象 Kubernetes 中已有的能力。
> 开始阅读本章节前,请确保已经了解 `Application` 资源。
## 概述
KubeVela 将 CUE 作为抽象最优方案的主要原因如下:
- **CUE 本身就是为大规模配置而设计。** CUE 能够感知非常复杂的配置文件,并且能够安全地更改可修改配置中成千上万个对象的值。这非常符合 KubeVela 的最初目标,即以 web-scale 方式定义和交付生产级别的应用程序web-scale是一种软件设计方法主要包含可扩展性、一致性、容忍度和版本控制等
- **CUE 支持一流的代码生成和自动化。** CUE 原生支持与现有工具以及工作流进行集成,反观其他工具则需要自定义复杂的方案才能实现。例如,需要手动使用 Go 代码生成 OpenAPI 模式。KubeVela 也是依赖 CUE 该特性进行构建开发工具和GUI界面。
- **CUE与Go完美集成。** KubeVela 像 Kubernetes 系统中的大多数项目一样使用 GO 进行开发。CUE 已经在 Go 中实现并提供了丰富的 API 。 KubeVela 以 CUE 为核心实现 Kubernetes 控制器。 借助 CUE KubeVela 可以轻松处理数据约束问题。
> 更多细节请查看 [The Configuration Complexity Curse](https://blog.cedriccharly.com/post/20191109-the-configuration-complexity-curse/) 以及 [The Logic of CUE](https://cuelang.org/docs/concepts/logic/)。
## 前提
请确保你的环境中已经安装如下命令行:
* [`cue` >=v0.2.2](https://cuelang.org/docs/install/)
## CUE 命令行基础
我们可以使用几乎相同的格式在同一个文件中定义模型和数据,以下为 CUE 基础数据类型:
```
a: 1.5
a: float
b: 1
b: int
d: [1, 2, 3]
g: {
h: "abc"
}
e: string
```
CUE 是 JSON 的超集, 我们可以像使用 json 一样使用 CUE同时具备以下便利性
* C 样式的注释,
* 字段名称可以省略引号且不带特殊字符,
* 字段末尾逗号可选,
* 允许列表中最后一个元素末尾带逗号,
* 外花括号可选。
CUE 拥有强大的命令行。请将数据保存到 `first.cue` 文件并尝试使用命令行。
* 格式化 CUE 文件。如果你使用 Goland 或者类似 JetBrains IDE
可以参考该文章配置自动格式化插件 [使用 Goland 设置 cuelang 的自动格式化](https://wonderflow.info/posts/2020-11-02-goland-cuelang-format/)。
该命令不仅可以格式化 CUE 文件,还能指出错误的模型,相当好用的命令。
```shell
cue fmt first.cue
```
* 模型校验。 除了 `cue fmt`,你还可以使用 `vue vet` 来校验模型.
```shell
cue vet first.cue
```
* 计算/渲染结果。 `cue eval` 可以计算 CUE 文件并且渲染出最终结果。
我们看到最终结果中并不包含 `a: float``b: int`,这是因为这两个变量已经被计算填充。
其中 `e: string` 没有被明确的赋值, 故保持不变.
```shell
$ cue eval first.cue
a: 1.5
b: 1
d: [1, 2, 3]
g: {
h: "abc"
}
e: string
```
* 渲染指定结果。例如,我们仅想知道文件中 `b` 的渲染结果,则可以使用该参数 `-e`
```shell
$ cue eval -e b first.cue
1
```
* 导出渲染结果。 `cue export` 可以导出最终渲染结果。如果一些变量没有被定义执行该命令将会报错。
```shell
$ cue export first.cue
e: cannot convert incomplete value "string" to JSON:
./first.cue:9:4
```
我们可以通过给 `e` 赋值来完成赋值,例如:
```shell
echo "e: \"abc\"" >> first.cue
```
然后,该命令就可以正常工作。默认情况下, 渲染结果会被格式化为 json 格式。
```shell
$ cue export first.cue
{
"a": 1.5,
"b": 1,
"d": [
1,
2,
3
],
"g": {
"h": "abc"
},
"e": "abc"
}
```
* 导出 YAML 格式渲染结果。
```shell
$ cue export first.cue --out yaml
a: 1.5
b: 1
d:
- 1
- 2
- 3
g:
h: abc
e: abc
```
* 导出指定变量的结果。
```shell
$ cue export -e g first.cue
{
"h": "abc"
}
```
至此, 你已经学习完所有常用 CUE 命令行参数。
## CUE 语言基础
* 数据类型: 以下为 CUE 的基础数据类型。
```shell
// float
a: 1.5
// int
b: 1
// string
c: "blahblahblah"
// array
d: [1, 2, 3, 1, 2, 3, 1, 2, 3]
// bool
e: true
// struct
f: {
a: 1.5
b: 1
d: [1, 2, 3, 1, 2, 3, 1, 2, 3]
g: {
h: "abc"
}
}
// null
j: null
```
* 自定义 CUE 类型。你可以使用 `#` 符号来指定一些表示 CUE 类型的变量。
```
#abc: string
```
我们将上述内容保存到 `second.cue` 文件。 执行 `cue export` 不会报 `#abc` 是一个类型不完整的值。
```shell
$ cue export second.cue
{}
```
你还可以定义更复杂的自定义结构,比如:
```
#abc: {
x: int
y: string
z: {
a: float
b: bool
}
}
```
自定义结构在 KubeVela 中被广泛用于定义模板和进行验证。
## CUE 模板和引用
我们开始尝试利用刚刚学习知识来定义 CUE 模版。
1. 定义结构体变量 `parameter`.
```shell
parameter: {
name: string
image: string
}
```
保存上述变量到文件 `deployment.cue`.
2. 定义更复杂的结构变量 `template` 同时引用变量 `parameter`.
```
template: {
apiVersion: "apps/v1"
kind: "Deployment"
spec: {
selector: matchLabels: {
"app.oam.dev/component": parameter.name
}
template: {
metadata: labels: {
"app.oam.dev/component": parameter.name
}
spec: {
containers: [{
name: parameter.name
image: parameter.image
}]
}}}
}
```
熟悉 Kubernetes 的人可能已经知道,这是 Kubernetes Deployment 的模板。 `parameter` 为模版的参数部分。
添加上述内容到文件 `deployment.cue`.
4. 随后, 我们通过添加以下内容来完成变量赋值:
```
parameter:{
name: "mytest"
image: "nginx:v1"
}
```
5. 最后, 导出渲染结果为 yaml 格式:
```shell
$ cue export deployment.cue -e template --out yaml
apiVersion: apps/v1
kind: Deployment
spec:
template:
spec:
containers:
- name: mytest
image: nginx:v1
metadata:
labels:
app.oam.dev/component: mytest
selector:
matchLabels:
app.oam.dev/component: mytest
```
## 高级 CUE 设计
* 开放的结构体和列表。在列表或者结构体中使用 `...` 说明该对象为开放的。
- 列表对象 `[...string]` ,说明该对象可以容纳多个字符串元素。
如果不添加 `...`, 该对象 `[string]` 说明列表只能容纳一个类型为 `string` 的元素。
- 如下所示的结构体说明可以包含未知字段。
```
{
abc: string
...
}
```
* 运算符 `|`, 它可以表示两种类型的值。如下所示,变量 `a` 表示类型可以是字符串或者整数类型。
```shell
a: string | int
```
* 默认值, 我们可以使用符号 `*` 定义变量的默认值。通常与符号 `|` 配合使用,
代表某种类型的默认值。如下所示,变量 `a` 类型为 `int`,默认值为 `1`
```shell
a: *1 | int
```
* 选填变量。 某些情况下,一些变量不一定被使用,这些变量就是可选变量,我们可以使用 `?:` 定义此类变量。
如下所示, `a` 是可选变量, 自定义 `#my` 对象中 `x``z` 为可选变量, 而 `y` 为必填字段。
```
a ?: int
#my: {
x ?: string
y : int
z ?:float
}
```
选填变量可以被跳过,这经常和条件判断逻辑一起使用。
具体来说,如果某些字段不存在,则 CUE 语法为 `if _variable_= _ | _` ,如下所示:
```
parameter: {
name: string
image: string
config?: [...#Config]
}
output: {
...
spec: {
containers: [{
name: parameter.name
image: parameter.image
if parameter.config != _|_ {
config: parameter.config
}
}]
}
...
}
```
* 运算符 `&`,该运算符用来运算两个变量。
```shell
a: *1 | int
b: 3
c: a & b
```
保存上述内容到 `third.cue` 文件。
你可以使用 `cue eval` 来验证结果:
```shell
$ cue eval third.cue
a: 1
b: 3
c: 3
```
* 条件判断。 当你执行一些级联操作时,不同的值会影响不同的结果,条件判断就非常有用。
因此,你可以在模版中执行 `if..else` 的逻辑。
```shell
price: number
feel: *"good" | string
// Feel bad if price is too high
if price > 100 {
feel: "bad"
}
price: 200
```
保存上述内容到 `fourth.cue` 文件。
你可以使用 `cue eval` 来验证结果:
```shell
$ cue eval fourth.cue
price: 200
feel: "bad"
```
另一个示例是将布尔类型作为参数。
```
parameter: {
name: string
image: string
useENV: bool
}
output: {
...
spec: {
containers: [{
name: parameter.name
image: parameter.image
if parameter.useENV == true {
env: [{name: "my-env", value: "my-value"}]
}
}]
}
...
}
```
* For循环。 我们为了避免重复可以使用 for 循环。
- Map 循环
```cue
parameter: {
name: string
image: string
env: [string]: string
}
output: {
spec: {
containers: [{
name: parameter.name
image: parameter.image
env: [
for k, v in parameter.env {
name: k
value: v
},
]
}]
}
}
```
- 类型循环
```
#a: {
"hello": "Barcelona"
"nihao": "Shanghai"
}
for k, v in #a {
"\(k)": {
nameLen: len(v)
value: v
}
}
```
- 切片循环
```cue
parameter: {
name: string
image: string
env: [...{name:string,value:string}]
}
output: {
...
spec: {
containers: [{
name: parameter.name
image: parameter.image
env: [
for _, v in parameter.env {
name: v.name
value: v.value
},
]
}]
}
}
```
备注, 可以使用 `"\( _my-statement_ )"` 进行字符串内部计算,比如上面类型循环示例中,获取值的长度等等操作。
## 导入 CUE 内部包
CUE 有很多 [internal packages](https://pkg.go.dev/cuelang.org/go@v0.2.2/pkg) 可以被 KubeVela 使用。
如下所示,使用 `strings.Join` 方法将字符串列表拼接成字符串。
```cue
import ("strings")
parameter: {
outputs: [{ip: "1.1.1.1", hostname: "xxx.com"}, {ip: "2.2.2.2", hostname: "yyy.com"}]
}
output: {
spec: {
if len(parameter.outputs) > 0 {
_x: [ for _, v in parameter.outputs {
"\(v.ip) \(v.hostname)"
}]
message: "Visiting URL: " + strings.Join(_x, "")
}
}
}
```
## 导入 Kubernetes 包
KubeVela 会从 Kubernetes 集群中读取 OpenApi并将 Kubernetes 所有资源自动构建为内部包。
你可以在 KubeVela 的 CUE 模版中通过 `kube/<apiVersion>` 导入这些包,就像使用 CUE 内部包一样。
比如,`Deployment` 可以这样使用:
```cue
import (
apps "kube/apps/v1"
)
parameter: {
name: string
}
output: apps.#Deployment
output: {
metadata: name: parameter.name
}
```
`Service` 可以这样使用(无需使用别名导入软件包):
```cue
import ("kube/v1")
output: v1.#Service
output: {
metadata: {
"name": parameter.name
}
spec: type: "ClusterIP",
}
parameter: {
name: "myapp"
}
```
甚至已经安装的 CRD 也可以正常使用:
```
import (
oam "kube/core.oam.dev/v1alpha2"
)
output: oam.#Application
output: {
metadata: {
"name": parameter.name
}
}
parameter: {
name: "myapp"
}
```

View File

@ -0,0 +1,367 @@
---
title: How-to
---
In this section, it will introduce how to use [CUE](https://cuelang.org/) to declare app components via `ComponentDefinition`.
> Before reading this part, please make sure you've learned the [Definition CRD](../definition-and-templates) in KubeVela.
## Declare `ComponentDefinition`
Here is a CUE based `ComponentDefinition` example which provides a abstraction for stateless workload type:
```yaml
apiVersion: core.oam.dev/v1beta1
kind: ComponentDefinition
metadata:
name: stateless
spec:
workload:
definition:
apiVersion: apps/v1
kind: Deployment
schematic:
cue:
template: |
parameter: {
name: string
image: string
}
output: {
apiVersion: "apps/v1"
kind: "Deployment"
spec: {
selector: matchLabels: {
"app.oam.dev/component": parameter.name
}
template: {
metadata: labels: {
"app.oam.dev/component": parameter.name
}
spec: {
containers: [{
name: parameter.name
image: parameter.image
}]
}
}
}
}
```
In detail:
- `.spec.workload` is required to indicate the workload type of this component.
- `.spec.schematic.cue.template` is a CUE template, specifically:
* The `output` filed defines the template for the abstraction.
* The `parameter` filed defines the template parameters, i.e. the configurable properties exposed in the `Application`abstraction (and JSON schema will be automatically generated based on them).
Let's declare another component named `task`, i.e. an abstraction for run-to-completion workload.
```yaml
apiVersion: core.oam.dev/v1beta1
kind: ComponentDefinition
metadata:
name: task
annotations:
definition.oam.dev/description: "Describes jobs that run code or a script to completion."
spec:
workload:
definition:
apiVersion: batch/v1
kind: Job
schematic:
cue:
template: |
output: {
apiVersion: "batch/v1"
kind: "Job"
spec: {
parallelism: parameter.count
completions: parameter.count
template: spec: {
restartPolicy: parameter.restart
containers: [{
image: parameter.image
if parameter["cmd"] != _|_ {
command: parameter.cmd
}
}]
}
}
}
parameter: {
count: *1 | int
image: string
restart: *"Never" | string
cmd?: [...string]
}
```
Save above `ComponentDefintion` objects to files and install them to your Kubernetes cluster by `$ kubectl apply -f stateless-def.yaml task-def.yaml`
## Declare an `Application`
The `ComponentDefinition` can be instantiated in `Application` abstraction as below:
```yaml
apiVersion: core.oam.dev/v1alpha2
kind: Application
metadata:
name: website
spec:
components:
- name: hello
type: stateless
properties:
image: crccheck/hello-world
name: mysvc
- name: countdown
type: task
properties:
image: centos:7
cmd:
- "bin/bash"
- "-c"
- "for i in 9 8 7 6 5 4 3 2 1 ; do echo $i ; done"
```
### Under The Hood
<details>
Above application resource will generate and manage following Kubernetes resources in your target cluster based on the `output` in CUE template and user input in `Application` properties.
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: backend
... # skip tons of metadata info
spec:
template:
spec:
containers:
- name: mysvc
image: crccheck/hello-world
metadata:
labels:
app.oam.dev/component: mysvc
selector:
matchLabels:
app.oam.dev/component: mysvc
---
apiVersion: batch/v1
kind: Job
metadata:
name: countdown
... # skip tons of metadata info
spec:
parallelism: 1
completions: 1
template:
metadata:
name: countdown
spec:
containers:
- name: countdown
image: 'centos:7'
command:
- bin/bash
- '-c'
- for i in 9 8 7 6 5 4 3 2 1 ; do echo $i ; done
restartPolicy: Never
```
</details>
## CUE `Context`
KubeVela allows you to reference the runtime information of your application via `conext` keyword.
The most widely used context is application name(`context.appName`) component name(`context.name`).
```cue
context: {
appName: string
name: string
}
```
For example, let's say you want to use the component name filled in by users as the container name in the workload instance:
```cue
parameter: {
image: string
}
output: {
...
spec: {
containers: [{
name: context.name
image: parameter.image
}]
}
...
}
```
> Note that `context` information are auto-injected before resources are applied to target cluster.
### Full available information in CUE `context`
| Context Variable | Description |
| :--: | :---------: |
| `context.appRevision` | The revision of the application |
| `context.appName` | The name of the application |
| `context.name` | The name of the component of the application |
| `context.namespace` | The namespace of the application |
| `context.output` | The rendered workload API resource of the component, this usually used in trait |
| `context.outputs.<resourceName>` | The rendered trait API resource of the component, this usually used in trait |
## Composition
It's common that a component definition is composed by multiple API resources, for example, a `webserver` component that is composed by a Deployment and a Service. CUE is a great solution to achieve this in simplified primitives.
> Another approach to do composition in KubeVela of course is [using Helm](../helm/component).
## How-to
KubeVela requires you to define the template of workload type in `output` section, and leave all the other resource templates in `outputs` section with format as below:
```cue
outputs: <unique-name>:
<full template data>
```
> The reason for this requirement is KubeVela needs to know it is currently rendering a workload so it could do some "magic" like patching annotations/labels or other data during it.
Below is the example for `webserver` definition:
```yaml
apiVersion: core.oam.dev/v1beta1
kind: ComponentDefinition
metadata:
name: webserver
annotations:
definition.oam.dev/description: "webserver is a combo of Deployment + Service"
spec:
workload:
definition:
apiVersion: apps/v1
kind: Deployment
schematic:
cue:
template: |
output: {
apiVersion: "apps/v1"
kind: "Deployment"
spec: {
selector: matchLabels: {
"app.oam.dev/component": context.name
}
template: {
metadata: labels: {
"app.oam.dev/component": context.name
}
spec: {
containers: [{
name: context.name
image: parameter.image
if parameter["cmd"] != _|_ {
command: parameter.cmd
}
if parameter["env"] != _|_ {
env: parameter.env
}
if context["config"] != _|_ {
env: context.config
}
ports: [{
containerPort: parameter.port
}]
if parameter["cpu"] != _|_ {
resources: {
limits:
cpu: parameter.cpu
requests:
cpu: parameter.cpu
}
}
}]
}
}
}
}
// an extra template
outputs: service: {
apiVersion: "v1"
kind: "Service"
spec: {
selector: {
"app.oam.dev/component": context.name
}
ports: [
{
port: parameter.port
targetPort: parameter.port
},
]
}
}
parameter: {
image: string
cmd?: [...string]
port: *80 | int
env?: [...{
name: string
value?: string
valueFrom?: {
secretKeyRef: {
name: string
key: string
}
}
}]
cpu?: string
}
```
The user could now declare an `Application` with it:
```yaml
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: webserver-demo
namespace: default
spec:
components:
- name: hello-world
type: webserver
properties:
image: crccheck/hello-world
port: 8000
env:
- name: "foo"
value: "bar"
cpu: "100m"
```
It will generate and manage below API resources in target cluster:
```shell
$ kubectl get deployment
NAME READY UP-TO-DATE AVAILABLE AGE
hello-world-v1 1/1 1 1 15s
$ kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
hello-world-trait-7bdcff98f7 ClusterIP <your ip> <none> 8000/TCP 32s
```
## What's Next
Please check the [Learning CUE](./basic) documentation about why we support CUE as first-class templating solution and more details about using CUE efficiently.

View File

@ -0,0 +1,53 @@
---
title: Define resources located in defferent namespace with application
---
In this section, we will introduce how to use cue template create resources (workload/trait) in different namespace with the application.
By default, the `metadata.namespace` of K8s resource in CuE template is automatically filled with the same namespace of the applicaiton.
If you want to create K8s resources running in a specific namespace witch is different with the application, you can set the `metadata.namespace` field.
KubeVela will create the resources in the specified namespace, and create a resourceTracker object as owener of those resources.
## Usage
```yaml
apiVersion: core.oam.dev/v1beta1
kind: ComponentDefinition
metadata:
name: worker
spec:
definitionRef:
name: deployments.apps
schematic:
cue:
template: |
parameter: {
name: string
image: string
namespace: string # make this parameter `namespace` as keyword which represents the resource maybe located in defferent namespace with application
}
output: {
apiVersion: "apps/v1"
kind: "Deployment"
metadata: {
namespace: my-namespace
}
spec: {
selector: matchLabels: {
"app.oam.dev/component": parameter.name
}
template: {
metadata: labels: {
"app.oam.dev/component": parameter.name
}
spec: {
containers: [{
name: parameter.name
image: parameter.image
}]
}}}
}
```

View File

@ -0,0 +1,443 @@
---
title: Patch Traits
---
**Patch** 是 trait 定义的一种非常常见的模式, 即应用操作员可以修改或者将路径属性设置为组件实例(通常是 workload )以启用某些操作功能例如 sidecar 或节点相似性规则(这应该在将资源应用于目标集群 **之前** 完成)。
当 component 定义由第三方 component 提供程序(例如,软件发行商)提供时,此模式非常有用,因此应用操作员无权更改其模板。
> 请注意,即使 patch trait 本身是由 CUE 定义的,它也可以修补任何 component无论其基于什么原理即 CUEHelm 和任何其他受支持的方式)。
下面是 `node-affinity` trait 的例子:
```yaml
apiVersion: core.oam.dev/v1beta1
kind: TraitDefinition
metadata:
annotations:
definition.oam.dev/description: "affinity specify node affinity and toleration"
name: node-affinity
spec:
appliesToWorkloads:
- webservice
- worker
podDisruptive: true
schematic:
cue:
template: |
patch: {
spec: template: spec: {
if parameter.affinity != _|_ {
affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: [{
matchExpressions: [
for k, v in parameter.affinity {
key: k
operator: "In"
values: v
},
]}]
}
if parameter.tolerations != _|_ {
tolerations: [
for k, v in parameter.tolerations {
effect: "NoSchedule"
key: k
operator: "Equal"
value: v
}]
}
}
}
parameter: {
affinity?: [string]: [...string]
tolerations?: [string]: string
}
```
上面的 patch trait 假定目标组件实例具有 `spec.template.spec.affinity` 字段。
因此,我们需要使用 `applyToWorkloads` 来强制执行该 trait仅适用于具有此字段的那些 workload 类型。
另外一个重要的字段是 `podDisruptive`,此 patch trait 将修改到 Pod 模板字段,因此对该 trait 的任何字段进行更改都会导致 Pod 重新启动,我们应该增加 `podDisruptive` 并且设置它的值为 true
以此告诉用户应用此 trait 将导致 Pod 重新启动。
现在,用户可以声明他们想要将节点相似性规则添加到 component 实例,如下所示:
```yaml
apiVersion: core.oam.dev/v1alpha2
kind: Application
metadata:
name: testapp
spec:
components:
- name: express-server
type: webservice
properties:
image: oamdev/testapp:v1
traits:
- type: "node-affinity"
properties:
affinity:
server-owner: ["owner1","owner2"]
resource-pool: ["pool1","pool2","pool3"]
tolerations:
resource-pool: "broken-pool1"
server-owner: "old-owner"
```
### 已知局限性
默认情况下KubeVela 中 patch trait 使用 CUE `merge` 操作。它具有以下已知约束
- 无法处理冲突。
- 例如,如果已将 component 实例的值设置为 `replicas=5`,则修改 `replicas` 字段的任何 patch trait 都将失败,也就是你不应在其 component 定义中公开 `replicas` 字段。
- patch 中的数组列表将按照索引顺序合并。它无法处理数组列表成员的重复。但这可以通过下面的另一个功能解决。
### 策略 Patch
`strategy patch` 对修改数组列表很有用。
> 请注意,这不是标准的 CUE 功能KubeVela 增强了 CUE 在这个场景的能力
使用 `//+patchKey=<key_name>` 注释,两个数组列表的合并逻辑将不遵循 CUE 行为。相反,它将列表视为对象并使用策略合并方法:
- 如果找到重复的 key则修改数据将与现有值合并
- 如果找不到重复项,则修改将追加到数组列表中。
策略 patch trait 的示例如下所示:
```yaml
apiVersion: core.oam.dev/v1beta1
kind: TraitDefinition
metadata:
annotations:
definition.oam.dev/description: "add sidecar to the app"
name: sidecar
spec:
appliesToWorkloads:
- webservice
- worker
podDisruptive: true
schematic:
cue:
template: |
patch: {
// +patchKey=name
spec: template: spec: containers: [parameter]
}
parameter: {
name: string
image: string
command?: [...string]
}
```
在上面的示例中,我们定义了 `patchKey``name` ,这是容器名称的参数 key 。 在这种情况下,如果 workload 中没有相同名称的容器,它将是一个 sidecar 容器,追加到 `spec.template.spec.containers` 数组列表中。 如果 workload 已经有一个具有与此 `Sidecar` trait 相同名称的容器,则将发生合并操作而不是追加操作(这将导致重复的容器)。
如果 `patch` and `outputs` 都存在于一个 trait 定义中,则将首先处理 `patch` 操作,然后呈现 `outputs`
```yaml
apiVersion: core.oam.dev/v1beta1
kind: TraitDefinition
metadata:
annotations:
definition.oam.dev/description: "expose the app"
name: expose
spec:
appliesToWorkloads:
- webservice
- worker
podDisruptive: true
schematic:
cue:
template: |
patch: {spec: template: metadata: labels: app: context.name}
outputs: service: {
apiVersion: "v1"
kind: "Service"
metadata: name: context.name
spec: {
selector: app: context.name
ports: [
for k, v in parameter.http {
port: v
targetPort: v
},
]
}
}
parameter: {
http: [string]: int
}
```
因此,将 Service 附加到给定 component 实例的上述 trait 将首先为 workload 打上相应的标签,然后基于 `outputs` 中的模板呈现服 Service 资源。
## Patch Trait 的更多使用案例
通常patch trait 非常有用,可以将操作问题与 component 定义分开,下面有更多示例。
### 添加标签
例如,修改 component 实例通用标签(虚拟组)。
```yaml
apiVersion: core.oam.dev/v1alpha2
kind: TraitDefinition
metadata:
annotations:
definition.oam.dev/description: "Add virtual group labels"
name: virtualgroup
spec:
appliesToWorkloads:
- webservice
- worker
podDisruptive: true
schematic:
cue:
template: |
patch: {
spec: template: {
metadata: labels: {
if parameter.scope == "namespace" {
"app.namespace.virtual.group": parameter.group
}
if parameter.scope == "cluster" {
"app.cluster.virtual.group": parameter.group
}
}
}
}
parameter: {
group: *"default" | string
scope: *"namespace" | string
}
```
然后可以像这样使用:
```yaml
apiVersion: core.oam.dev/v1beta1
kind: Application
spec:
...
traits:
- type: virtualgroup
properties:
group: "my-group1"
scope: "cluster"
```
### 添加注释
与常见标签类似,你也可以使用注释来修补 component 实例。注释值应为 JSON 字符串。
```yaml
apiVersion: core.oam.dev/v1beta1
kind: TraitDefinition
metadata:
annotations:
definition.oam.dev/description: "Specify auto scale by annotation"
name: kautoscale
spec:
appliesToWorkloads:
- webservice
- worker
podDisruptive: false
schematic:
cue:
template: |
import "encoding/json"
patch: {
metadata: annotations: {
"my.custom.autoscale.annotation": json.Marshal({
"minReplicas": parameter.min
"maxReplicas": parameter.max
})
}
}
parameter: {
min: *1 | int
max: *3 | int
}
```
### 添加 Pod 环境变量
将系统环境注入 Pod 也是非常常见的例子。
> 这种情况取决于策略合并修改,因此不要忘记添加 `+patchKey=name` ,如下所示:
```yaml
apiVersion: core.oam.dev/v1beta1
kind: TraitDefinition
metadata:
annotations:
definition.oam.dev/description: "add env into your pods"
name: env
spec:
appliesToWorkloads:
- webservice
- worker
podDisruptive: true
schematic:
cue:
template: |
patch: {
spec: template: spec: {
// +patchKey=name
containers: [{
name: context.name
// +patchKey=name
env: [
for k, v in parameter.env {
name: k
value: v
},
]
}]
}
}
parameter: {
env: [string]: string
}
```
### 基于外部身份验证服务注入 `ServiceAccount`
在此示例中,从身份验证服务动态请求了服务帐户并将其修补到该服务中。
此示例将 UID 令牌放在 HTTP 头中,但如果愿意,也可以使用请求体。
```yaml
apiVersion: core.oam.dev/v1beta1
kind: TraitDefinition
metadata:
annotations:
definition.oam.dev/description: "dynamically specify service account"
name: service-account
spec:
appliesToWorkloads:
- webservice
- worker
podDisruptive: true
schematic:
cue:
template: |
processing: {
output: {
credentials?: string
}
http: {
method: *"GET" | string
url: parameter.serviceURL
request: {
header: {
"authorization.token": parameter.uidtoken
}
}
}
}
patch: {
spec: template: spec: serviceAccountName: processing.output.credentials
}
parameter: {
uidtoken: string
serviceURL: string
}
```
`processing.http` 部分是高级功能,允许 trait 定义在渲染资源期间发送 HTTP 请求。有关更多详细信息,请参考[特质定义中的执行HTTP请求](#Processing-Trait) 部分。
### 添加 `InitContainer`
[`InitContainer`](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-initialization/#create-a-pod-that-has-an-init-container) 对在 image 中预定义操作并在应用程序容器之前运行它很有用。
下面是一个例子:
```yaml
apiVersion: core.oam.dev/v1beta1
kind: TraitDefinition
metadata:
annotations:
definition.oam.dev/description: "add an init container and use shared volume with pod"
name: init-container
spec:
appliesToWorkloads:
- webservice
- worker
podDisruptive: true
schematic:
cue:
template: |
patch: {
spec: template: spec: {
// +patchKey=name
containers: [{
name: context.name
// +patchKey=name
volumeMounts: [{
name: parameter.mountName
mountPath: parameter.appMountPath
}]
}]
initContainers: [{
name: parameter.name
image: parameter.image
if parameter.command != _|_ {
command: parameter.command
}
// +patchKey=name
volumeMounts: [{
name: parameter.mountName
mountPath: parameter.initMountPath
}]
}]
// +patchKey=name
volumes: [{
name: parameter.mountName
emptyDir: {}
}]
}
}
parameter: {
name: string
image: string
command?: [...string]
mountName: *"workdir" | string
appMountPath: string
initMountPath: string
}
```
用法可以是:
```yaml
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: testapp
spec:
components:
- name: express-server
type: webservice
properties:
image: oamdev/testapp:v1
traits:
- type: "init-container"
properties:
name: "install-container"
image: "busybox"
command:
- wget
- "-O"
- "/work-dir/index.html"
- http://info.cern.ch
mountName: "workdir"
appMountPath: "/usr/share/nginx/html"
initMountPath: "/work-dir"
```

View File

@ -0,0 +1,135 @@
---
title: 状态回写
---
本文档将说明如何通过在定义对象中使用 CUE 模板来实现状态回写。
## 健康检查
在 Workload 和 Trait 中健康检查字段都是 `spec.status.healthPolicy`
如果没有定义该字段,健康检查结果默认为 `true`
CUE 中关键字为 `isHealth`CUE 表达式结果必须是 `bool` 类型。
KubeVela 运行时将定期评估 CUE 表达式直到状态为健康。控制器每次都会获取所有 Kubernetes 资源,同时将结果填充到 context 字段中。
所以 context 字段将包含以下信息:
```cue
context:{
name: <component name>
appName: <app name>
output: <K8s workload resource>
outputs: {
<resource1>: <K8s trait resource1>
<resource2>: <K8s trait resource2>
}
}
```
Trait 并不包含 `context.ouput` 字段,其他字段都是相同。
以下为健康检查的示例:
```yaml
apiVersion: core.oam.dev/v1beta1
kind: ComponentDefinition
spec:
status:
healthPolicy: |
isHealth: (context.output.status.readyReplicas > 0) && (context.output.status.readyReplicas == context.output.status.replicas)
...
```
```yaml
apiVersion: core.oam.dev/v1beta1
kind: TraitDefinition
spec:
status:
healthPolicy: |
isHealth: len(context.outputs.service.spec.clusterIP) > 0
...
```
> Component 健康检查示例请参考 [这篇文章](https://github.com/oam-dev/kubevela/blob/master/docs/examples/app-with-status/template.yaml) 。
该健康检查结果将被记录在组件对应的 `Application` 资源中。
```yaml
apiVersion: core.oam.dev/v1beta1
kind: Application
spec:
components:
- name: myweb
type: worker
properties:
cmd:
- sleep
- "1000"
enemies: alien
image: busybox
lives: "3"
traits:
- type: ingress
properties:
domain: www.example.com
http:
/: 80
status:
...
services:
- healthy: true
message: "type: busybox,\t enemies:alien"
name: myweb
traits:
- healthy: true
message: 'Visiting URL: www.example.com, IP: 47.111.233.220'
type: ingress
status: running
```
## 自定义状态
自定义状态配置项为 `spec.status.customStatus`Workload 和 Trait 中都是该字段。
自定义状态 CUE 中关键词为 `message`CUE 表达式的结果必须是 `string` 类型。
自定义状态的内部机制类似上面介绍的健康检查。Application CRD 控制器将评估 CUE 表达式直到检查成功。
context 字段将包含以下信息:
```cue
context:{
name: <component name>
appName: <app name>
output: <K8s workload resource>
outputs: {
<resource1>: <K8s trait resource1>
<resource2>: <K8s trait resource2>
}
}
```
Trait 并不包含 `context.ouput` 字段,其他字段都是相同。
Component 健康检查示例请参考 [这篇文章](https://github.com/oam-dev/kubevela/blob/master/docs/examples/app-with-status/template.yaml) 。
```yaml
apiVersion: core.oam.dev/v1beta1
kind: ComponentDefinition
spec:
status:
customStatus: |-
message: "type: " + context.output.spec.template.spec.containers[0].image + ",\t enemies:" + context.outputs.gameconfig.data.enemies
...
```
```yaml
apiVersion: core.oam.dev/v1beta1
kind: TraitDefinition
spec:
status:
customStatus: |-
message: "type: "+ context.outputs.service.spec.type +",\t clusterIP:"+ context.outputs.service.spec.clusterIP+",\t ports:"+ "\(context.outputs.service.spec.ports[0].port)"+",\t domain"+context.outputs.ingress.spec.rules[0].host
...
```

View File

@ -0,0 +1,145 @@
---
title: 如何定义
---
在本节中,我们将介绍如何定义 Trait。
## 简单 Trait
可以通过简单地参考现有的 Kubernetes API 资源来定义 KubeVela 中的 Trait。
```yaml
apiVersion: core.oam.dev/v1beta1
kind: TraitDefinition
metadata:
name: ingress
spec:
definitionRef:
name: ingresses.networking.k8s.io
```
让我们将此 Trait 附加到 `Application` 中的 Component 实例:
```yaml
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: testapp
spec:
components:
- name: express-server
type: webservice
properties:
cmd:
- node
- server.js
image: oamdev/testapp:v1
port: 8080
traits:
- type: ingress
properties:
rules:
- http:
paths:
- path: /testpath
pathType: Prefix
backend:
service:
name: test
port:
number: 80
```
注意在这个例子中,所引用资源的 `spec` 中的所有字段都将向最终用户公开,并且不允许将任何元数据(例如 `annotations` 等)设置为 Trait 的属性。 因此,当你希望将自己的 CRD 和控制器作为 Trait 时,通常使用此方法,并且它不依赖 `annotations` 等作为调整手段。
## 使用 CUE 来构建 Trait
也推荐使用 CUE 的方式来定义 Trait。在这个例子中它带有抽象你可以完全灵活地根据需要来模板化任何资源和字段。请注意KubeVela 要求所有 Trait 必须在 CUE 模板的 `outputs` 部分(而非 `output` )中定义,格式如下:
```cue
outputs: <unique-name>:
<full template data>
```
以下是 `ingress` 的 Trait 示例。
```yaml
apiVersion: core.oam.dev/v1beta1
kind: TraitDefinition
metadata:
name: ingress
spec:
podDisruptive: false
schematic:
cue:
template: |
parameter: {
domain: string
http: [string]: int
}
// trait template can have multiple outputs in one trait
outputs: service: {
apiVersion: "v1"
kind: "Service"
spec: {
selector:
app: context.name
ports: [
for k, v in parameter.http {
port: v
targetPort: v
},
]
}
}
outputs: ingress: {
apiVersion: "networking.k8s.io/v1beta1"
kind: "Ingress"
metadata:
name: context.name
spec: {
rules: [{
host: parameter.domain
http: {
paths: [
for k, v in parameter.http {
path: k
backend: {
serviceName: context.name
servicePort: v
}
},
]
}
}]
}
}
```
让我们将此 Trait 附加到`Application`中的 Component 实例中:
```yaml
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: testapp
spec:
components:
- name: express-server
type: webservice
properties:
cmd:
- node
- server.js
image: oamdev/testapp:v1
port: 8080
traits:
- type: ingress
properties:
domain: test.my.domain
http:
"/api": 8080
```
基于 CUE 的 Trait 定义还可以支持许多其他高级方案,例如修补和数据传递。 在接下来的文档中将对它们进行详细说明。

View File

@ -0,0 +1,665 @@
---
title: 调试, 测试 以及 Dry-run
---
基于具有强大灵活抽象能力的 CUE 定义的模版来说,调试、测试以及 dry-run 非常重要。本教程将逐步介绍如何进行调试。
## 前提
请确保你的环境已经安装以下 CLI
* [`cue` >=v0.2.2](https://cuelang.org/docs/install/)
## 定义 Definition 和 Template
我们建议将 `Definition Object` 定义拆分为两个部分CRD 部分和 CUE 模版部分。前面的拆分会帮忙我们对 CUE 模版进行调试、测试以及 dry-run 操作。
我们将 CRD 部分保存到 `def.yaml` 文件。
```yaml
apiVersion: core.oam.dev/v1beta1
kind: ComponentDefinition
metadata:
name: microservice
annotations:
definition.oam.dev/description: "Describes a microservice combo Deployment with Service."
spec:
workload:
definition:
apiVersion: apps/v1
kind: Deployment
schematic:
cue:
template: |
```
同时将 CUE 模版部分保存到 `def.cue` 文件,随后我们可以使用 CUE 命令行(`cue fmt` / `cue vet`)格式化和校验 CUE 文件。
```
output: {
// Deployment
apiVersion: "apps/v1"
kind: "Deployment"
metadata: {
name: context.name
namespace: "default"
}
spec: {
selector: matchLabels: {
"app": context.name
}
template: {
metadata: {
labels: {
"app": context.name
"version": parameter.version
}
}
spec: {
serviceAccountName: "default"
terminationGracePeriodSeconds: parameter.podShutdownGraceSeconds
containers: [{
name: context.name
image: parameter.image
ports: [{
if parameter.containerPort != _|_ {
containerPort: parameter.containerPort
}
if parameter.containerPort == _|_ {
containerPort: parameter.servicePort
}
}]
if parameter.env != _|_ {
env: [
for k, v in parameter.env {
name: k
value: v
},
]
}
resources: {
requests: {
if parameter.cpu != _|_ {
cpu: parameter.cpu
}
if parameter.memory != _|_ {
memory: parameter.memory
}
}
}
}]
}
}
}
}
// Service
outputs: service: {
apiVersion: "v1"
kind: "Service"
metadata: {
name: context.name
labels: {
"app": context.name
}
}
spec: {
type: "ClusterIP"
selector: {
"app": context.name
}
ports: [{
port: parameter.servicePort
if parameter.containerPort != _|_ {
targetPort: parameter.containerPort
}
if parameter.containerPort == _|_ {
targetPort: parameter.servicePort
}
}]
}
}
parameter: {
version: *"v1" | string
image: string
servicePort: int
containerPort?: int
// +usage=Optional duration in seconds the pod needs to terminate gracefully
podShutdownGraceSeconds: *30 | int
env: [string]: string
cpu?: string
memory?: string
}
```
以上操作完成之后,使用该脚本 [`hack/vela-templates/mergedef.sh`](https://github.com/oam-dev/kubevela/blob/master/hack/vela-templates/mergedef.sh) 将 `def.yaml``def.cue` 合并到完整的 Definition 对象中。
```shell
$ ./hack/vela-templates/mergedef.sh def.yaml def.cue > microservice-def.yaml
```
## 调试 CUE 模版
### 使用 `cue vet` 进行校验
```shell
$ cue vet def.cue
output.metadata.name: reference "context" not found:
./def.cue:6:14
output.spec.selector.matchLabels.app: reference "context" not found:
./def.cue:11:11
output.spec.template.metadata.labels.app: reference "context" not found:
./def.cue:16:17
output.spec.template.spec.containers.name: reference "context" not found:
./def.cue:24:13
outputs.service.metadata.name: reference "context" not found:
./def.cue:62:9
outputs.service.metadata.labels.app: reference "context" not found:
./def.cue:64:11
outputs.service.spec.selector.app: reference "context" not found:
./def.cue:70:11
```
常见错误 `reference "context" not found` 主要发生在 [`context`](./cue/component#cue-context),该部分是仅在 KubeVela 控制器中存在的运行时信息。我们可以在 `def.cue` 中模拟 `context` ,从而对 CUE 模版进行 end-to-end 的校验操作。
> 注意,完成校验测试之后需要清除所有模拟数据。
```CUE
... // existing template data
context: {
name: string
}
```
随后执行命令:
```shell
$ cue vet def.cue
some instances are incomplete; use the -c flag to show errors or suppress this message
```
该错误 `reference "context" not found` 已经被解决,但是 `cue vet` 仅对数据类型进行校验,这还不能证明模版逻辑是准确对。因此,我们需要使用 `cue vet -c` 完成最终校验:
```shell
$ cue vet def.cue -c
context.name: incomplete value string
output.metadata.name: incomplete value string
output.spec.selector.matchLabels.app: incomplete value string
output.spec.template.metadata.labels.app: incomplete value string
output.spec.template.spec.containers.0.image: incomplete value string
output.spec.template.spec.containers.0.name: incomplete value string
output.spec.template.spec.containers.0.ports.0.containerPort: incomplete value int
outputs.service.metadata.labels.app: incomplete value string
outputs.service.metadata.name: incomplete value string
outputs.service.spec.ports.0.port: incomplete value int
outputs.service.spec.ports.0.targetPort: incomplete value int
outputs.service.spec.selector.app: incomplete value string
parameter.image: incomplete value string
parameter.servicePort: incomplete value int
```
此时,命令行抛出运行时数据不完整的异常(主要因为 `context``parameter` 字段字段中还有设置值),现在我们填充更多的模拟数据到 `def.cue` 文件:
```CUE
context: {
name: "test-app"
}
parameter: {
version: "v2"
image: "image-address"
servicePort: 80
containerPort: 8000
env: {"PORT": "8000"}
cpu: "500m"
memory: "128Mi"
}
```
此时,执行以下命令行没有抛出异常,说明逻辑校验通过:
```shell
cue vet def.cue -c
```
#### 使用 `cue export` 校验已渲染的资源
该命令行 `cue export` 将会渲染结果以 YAML 格式导出:
```shell
$ cue export -e output def.cue --out yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: test-app
namespace: default
spec:
selector:
matchLabels:
app: test-app
template:
metadata:
labels:
app: test-app
version: v2
spec:
serviceAccountName: default
terminationGracePeriodSeconds: 30
containers:
- name: test-app
image: image-address
```
```shell
$ cue export -e outputs.service def.cue --out yaml
apiVersion: v1
kind: Service
metadata:
name: test-app
labels:
app: test-app
spec:
selector:
app: test-app
type: ClusterIP
```
### 测试使用 `Kube` 包的 CUE 模版
KubeVela 将所有内置 Kubernetes API 资源以及 CRD 自动生成为内部 CUE 包。
你可以将它们导入CUE模板中以简化模板以及帮助你进行验证。
目前有两种方式来导入内部 `kube` 包。
1. 以固定方式导入: `kube/<apiVersion>` ,这样我们就可以直接引用 `Kind` 对应的结构体。
```cue
import (
apps "kube/apps/v1"
corev1 "kube/v1"
)
// output is validated by Deployment.
output: apps.#Deployment
outputs: service: corev1.#Service
```
这是比较好记易用的方式,主要因为它与 Kubernetes Object 的用法一致,只需要在 `apiVersion` 之前添加前缀 `kube/`
当然,这个方式仅在 KubeVela 中被支持,所以你只能通过该方法 [`vela system dry-run`](#dry-run-the-application) 进行调试和测试。
2. 以第三方包的方式导入。
你可以运行 `vela system cue-packages` 获取所有内置 `kube` 包,通过这个方式可以了解当前支持的 `third-party packages`
```shell
$ vela system cue-packages
DEFINITION-NAME IMPORT-PATH USAGE
#Deployment k8s.io/apps/v1 Kube Object for apps/v1.Deployment
#Service k8s.io/core/v1 Kube Object for v1.Service
#Secret k8s.io/core/v1 Kube Object for v1.Secret
#Node k8s.io/core/v1 Kube Object for v1.Node
#PersistentVolume k8s.io/core/v1 Kube Object for v1.PersistentVolume
#Endpoints k8s.io/core/v1 Kube Object for v1.Endpoints
#Pod k8s.io/core/v1 Kube Object for v1.Pod
```
其实,这些都是内置包,只是你可以像 `third-party packages` 一样使用 `import-path` 导入这些包。
当前方式你可以使用 `cue` 命令行进行调试。
#### 使用 `Kube` 包的 CUE 模版调试流程
此部分主要介绍使用 `cue` 命令行对 CUE 模版调试和测试的流程,并且可以在 KubeVela中使用 **完全相同的 CUE 模版**
1. 创建目录,初始化 CUE 模块
```shell
mkdir cue-debug && cd cue-debug/
cue mod init oam.dev
go mod init oam.dev
touch def.cue
```
2. 使用 `cue` 命令行下载 `third-party packages`
其实在 KubeVela 中并不需要下载这些包,因为它们已经被从 Kubernetes API 自动生成。
但是在本地测试环境,我们需要使用 `cue get go` 来获取 Go 包并将其转换为 CUE 格式的文件。
所以,为了能够使用 Kubernetes 中 `Deployment``Serivice` 资源,我们需要下载并转换为 `core``apps` Kubernetes 模块的 CUE 定义,如下所示:
```shell
cue get go k8s.io/api/core/v1
cue get go k8s.io/api/apps/v1
```
随后,该模块目录下可以看到如下结构:
```shell
├── cue.mod
│ ├── gen
│ │ └── k8s.io
│ │ ├── api
│ │ │ ├── apps
│ │ │ └── core
│ │ └── apimachinery
│ │ └── pkg
│ ├── module.cue
│ ├── pkg
│ └── usr
├── def.cue
├── go.mod
└── go.sum
```
该包在 CUE 模版中被导入的路径应该是:
```cue
import (
apps "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
)
```
3. 重构目录结构
我们的目标是本地测试模版并在 KubeVela 中使用相同模版。
所以我们需要对我们本地 CUE 模块目录进行一些重构,并将目录与 KubeVela 提供的导入路径保持一致。
我们将 `apps``core` 目录从 `cue.mod/gen/k8s.io/api` 复制到 `cue.mod/gen/k8s.io`
请注意,我们应将源目录 `apps``core` 保留在 `gen/k8s.io/api` 中,以避免出现包依赖性问题。
```bash
cp -r cue.mod/gen/k8s.io/api/apps cue.mod/gen/k8s.io
cp -r cue.mod/gen/k8s.io/api/core cue.mod/gen/k8s.io
```
合并过之后到目录结构如下:
```shell
├── cue.mod
│ ├── gen
│ │ └── k8s.io
│ │ ├── api
│ │ │ ├── apps
│ │ │ └── core
│ │ ├── apimachinery
│ │ │ └── pkg
│ │ ├── apps
│ │ └── core
│ ├── module.cue
│ ├── pkg
│ └── usr
├── def.cue
├── go.mod
└── go.sum
```
因此,您可以使用与 KubeVela 对齐的路径导入包:
```cue
import (
apps "k8s.io/apps/v1"
corev1 "k8s.io/core/v1"
)
```
4. 运行测试
最终,我们可以使用 `Kube` 包测试 CUE 模版。
```cue
import (
apps "k8s.io/apps/v1"
corev1 "k8s.io/core/v1"
)
// output is validated by Deployment.
output: apps.#Deployment
output: {
metadata: {
name: context.name
namespace: "default"
}
spec: {
selector: matchLabels: {
"app": context.name
}
template: {
metadata: {
labels: {
"app": context.name
"version": parameter.version
}
}
spec: {
terminationGracePeriodSeconds: parameter.podShutdownGraceSeconds
containers: [{
name: context.name
image: parameter.image
ports: [{
if parameter.containerPort != _|_ {
containerPort: parameter.containerPort
}
if parameter.containerPort == _|_ {
containerPort: parameter.servicePort
}
}]
if parameter.env != _|_ {
env: [
for k, v in parameter.env {
name: k
value: v
},
]
}
resources: {
requests: {
if parameter.cpu != _|_ {
cpu: parameter.cpu
}
if parameter.memory != _|_ {
memory: parameter.memory
}
}
}
}]
}
}
}
}
outputs:{
service: corev1.#Service
}
// Service
outputs: service: {
metadata: {
name: context.name
labels: {
"app": context.name
}
}
spec: {
//type: "ClusterIP"
selector: {
"app": context.name
}
ports: [{
port: parameter.servicePort
if parameter.containerPort != _|_ {
targetPort: parameter.containerPort
}
if parameter.containerPort == _|_ {
targetPort: parameter.servicePort
}
}]
}
}
parameter: {
version: *"v1" | string
image: string
servicePort: int
containerPort?: int
// +usage=Optional duration in seconds the pod needs to terminate gracefully
podShutdownGraceSeconds: *30 | int
env: [string]: string
cpu?: string
memory?: string
}
// mock context data
context: {
name: "test"
}
// mock parameter data
parameter: {
image: "test-image"
servicePort: 8000
env: {
"HELLO": "WORLD"
}
}
```
使用 `cue export` 导出渲染结果。
```shell
$ cue export def.cue --out yaml
output:
metadata:
name: test
namespace: default
spec:
selector:
matchLabels:
app: test
template:
metadata:
labels:
app: test
version: v1
spec:
terminationGracePeriodSeconds: 30
containers:
- name: test
image: test-image
ports:
- containerPort: 8000
env:
- name: HELLO
value: WORLD
resources:
requests: {}
outputs:
service:
metadata:
name: test
labels:
app: test
spec:
selector:
app: test
ports:
- port: 8000
targetPort: 8000
parameter:
version: v1
image: test-image
servicePort: 8000
podShutdownGraceSeconds: 30
env:
HELLO: WORLD
context:
name: test
```
## Dry-Run `Application`
当 CUE 模版就绪,我们就可以使用 `vela system dry-run` 执行 dry-run 并检查在真实 Kubernetes 集群中被渲染的资源。该命令行背后的执行逻辑与 KubeVela 中 `Application` 控制器的逻辑是一致的。
首先,我们需要使用 `mergedef.sh` 合并 Definition 和 CUE 文件。
```shell
$ mergedef.sh def.yaml def.cue > componentdef.yaml
```
随后,我们创建 `test-app.yaml` Application。
```yaml
apiVersion: core.oam.dev/v1beta1
kind: Application
metadata:
name: boutique
namespace: default
spec:
components:
- name: frontend
type: microservice
properties:
image: registry.cn-hangzhou.aliyuncs.com/vela-samples/frontend:v0.2.2
servicePort: 80
containerPort: 8080
env:
PORT: "8080"
cpu: "100m"
memory: "64Mi"
```
针对上面 Application 使用 `vela system dry-run` 命令执行 dry-run 操作。
```shell
$ vela system dry-run -f test-app.yaml -d componentdef.yaml
---
# Application(boutique) -- Comopnent(frontend)
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.oam.dev/component: frontend
app.oam.dev/name: boutique
workload.oam.dev/type: microservice
name: frontend
namespace: default
spec:
selector:
matchLabels:
app: frontend
template:
metadata:
labels:
app: frontend
version: v1
spec:
containers:
- env:
- name: PORT
value: "8080"
image: registry.cn-hangzhou.aliyuncs.com/vela-samples/frontend:v0.2.2
name: frontend
ports:
- containerPort: 8080
resources:
requests:
cpu: 100m
memory: 64Mi
serviceAccountName: default
terminationGracePeriodSeconds: 30
---
apiVersion: v1
kind: Service
metadata:
labels:
app: frontend
app.oam.dev/component: frontend
app.oam.dev/name: boutique
trait.oam.dev/resource: service
trait.oam.dev/type: AuxiliaryWorkload
name: frontend
spec:
ports:
- port: 80
targetPort: 8080
selector:
app: frontend
type: ClusterIP
---
```

Some files were not shown because too many files have changed in this diff Show More