Implement Helm v3 tester

This commit is contained in:
stefanprodan 2019-09-08 09:31:14 +03:00
parent a212f032a6
commit 507075920c
11 changed files with 95 additions and 10 deletions

View File

@ -14,6 +14,10 @@ chmod +x linux-amd64/helm && mv linux-amd64/helm /usr/local/bin/helm && \
chmod +x linux-amd64/tiller && mv linux-amd64/tiller /usr/local/bin/tiller && \
rm -rf linux-amd64
RUN curl -sSL "https://get.helm.sh/helm-v3.0.0-beta.3-linux-amd64.tar.gz" | tar xvz && \
chmod +x linux-amd64/helm && mv linux-amd64/helm /usr/local/bin/helmv3 && \
rm -rf linux-amd64
RUN GRPC_HEALTH_PROBE_VERSION=v0.3.0 && \
wget -qO /usr/local/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \
chmod +x /usr/local/bin/grpc_health_probe

View File

@ -19,7 +19,7 @@ spec:
serviceAccountName: tiller
containers:
- name: helmtester
image: weaveworks/flagger-loadtester:0.4.0
image: weaveworks/flagger-loadtester:0.8.0
imagePullPolicy: IfNotPresent
ports:
- name: http

View File

@ -17,7 +17,7 @@ spec:
spec:
containers:
- name: loadtester
image: weaveworks/flagger-loadtester:0.7.1
image: weaveworks/flagger-loadtester:0.8.0
imagePullPolicy: IfNotPresent
ports:
- name: http

View File

@ -1,7 +1,7 @@
apiVersion: v1
name: loadtester
version: 0.7.1
appVersion: 0.7.1
version: 0.8.0
appVersion: 0.8.0
kubeVersion: ">=1.11.0-0"
engine: gotpl
description: Flagger's load testing services based on rakyll/hey and bojand/ghz that generates traffic during canary analysis when configured as a webhook.

View File

@ -2,7 +2,7 @@ replicaCount: 1
image:
repository: weaveworks/flagger-loadtester
tag: 0.7.1
tag: 0.8.0
pullPolicy: IfNotPresent
logLevel: info

View File

@ -10,7 +10,7 @@ import (
"time"
)
var VERSION = "0.7.1"
var VERSION = "0.8.0"
var (
logLevel string
port string

View File

@ -882,6 +882,20 @@ Now you can add pre-rollout webhooks to the canary analysis spec:
When the canary analysis starts, Flagger will call the pre-rollout webhooks before routing traffic to the canary.
If the helm test fails, Flagger will retry until the analysis threshold is reached and the canary is rolled back.
If you are using Helm v3, you'll have to create a dedicated service account and add the release namespace to the test command:
```yaml
canaryAnalysis:
webhooks:
- name: "smoke test"
type: pre-rollout
url: http://flagger-helmtester.kube-system/
timeout: 3m
metadata:
type: "helmv3"
cmd: "test run {{ .Release.Name }} --cleanup -n {{ .Release.Namespace }}"
```
As an alternative to Helm you can use the [Bash Automated Testing System](https://github.com/bats-core/bats-core) to run your tests.
```yaml

View File

@ -17,7 +17,7 @@ spec:
spec:
containers:
- name: loadtester
image: weaveworks/flagger-loadtester:0.7.1
image: weaveworks/flagger-loadtester:0.8.0
imagePullPolicy: IfNotPresent
ports:
- name: http

View File

@ -20,14 +20,14 @@ func (task *HelmTask) Hash() string {
}
func (task *HelmTask) Run(ctx context.Context) (bool, error) {
helmCmd := fmt.Sprintf("helm %s", task.command)
helmCmd := fmt.Sprintf("%s %s", TaskTypeHelm, task.command)
task.logger.With("canary", task.canary).Infof("running command %v", helmCmd)
cmd := exec.CommandContext(ctx, "helm", strings.Fields(task.command)...)
cmd := exec.CommandContext(ctx, TaskTypeHelm, strings.Fields(task.command)...)
out, err := cmd.CombinedOutput()
if err != nil {
task.logger.With("canary", task.canary).Errorf("command failed %s %v %s", task.command, err, out)
return false, fmt.Errorf(" %v %v", err, out)
return false, fmt.Errorf(" %v %s", err, out)
} else {
if task.logCmdOutput {
fmt.Printf("%s\n", out)

42
pkg/loadtester/helmv3.go Normal file
View File

@ -0,0 +1,42 @@
package loadtester
import (
"context"
"fmt"
"os/exec"
"strings"
)
const TaskTypeHelmv3 = "helmv3"
type HelmTaskv3 struct {
TaskBase
command string
logCmdOutput bool
}
func (task *HelmTaskv3) Hash() string {
return hash(task.canary + task.command)
}
func (task *HelmTaskv3) Run(ctx context.Context) (bool, error) {
helmCmd := fmt.Sprintf("%s %s", TaskTypeHelmv3, task.command)
task.logger.With("canary", task.canary).Infof("running command %v", helmCmd)
cmd := exec.CommandContext(ctx, TaskTypeHelmv3, strings.Fields(task.command)...)
out, err := cmd.CombinedOutput()
if err != nil {
task.logger.With("canary", task.canary).Errorf("command failed %s %v %s", task.command, err, out)
return false, fmt.Errorf(" %v %s", err, out)
} else {
if task.logCmdOutput {
fmt.Printf("%s\n", out)
}
task.logger.With("canary", task.canary).Infof("command finished %v", helmCmd)
}
return true, nil
}
func (task *HelmTaskv3) String() string {
return task.command
}

View File

@ -185,6 +185,31 @@ func ListenAndServe(port string, timeout time.Duration, logger *zap.SugaredLogge
return
}
// run helmv3 command (blocking task)
if typ == TaskTypeHelmv3 {
helm := HelmTaskv3{
command: payload.Metadata["cmd"],
logCmdOutput: true,
TaskBase: TaskBase{
canary: fmt.Sprintf("%s.%s", payload.Name, payload.Namespace),
logger: logger,
},
}
ctx, cancel := context.WithTimeout(context.Background(), taskRunner.timeout)
defer cancel()
ok, err := helm.Run(ctx)
if !ok {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
return
}
w.WriteHeader(http.StatusOK)
return
}
taskFactory, ok := GetTaskFactory(typ)
if !ok {
w.WriteHeader(http.StatusBadRequest)