Merge remote-tracking branch 'upstream/master'

This commit is contained in:
Robert Kwolek 2020-11-12 20:47:37 +01:00
commit 67dca9c7ad
65 changed files with 1570 additions and 178 deletions

View File

@ -94,7 +94,12 @@ jobs:
- run: test/container-build.sh
- run: test/e2e-kind.sh v1.18.2
- run: test/e2e-istio.sh
- run: test/e2e-istio-dependencies.sh
- run: test/e2e-istio-tests.sh
- run: test/e2e-istio-tests-skip-analysis.sh
- run: test/e2e-kubernetes-cleanup.sh
- run: test/e2e-istio-dependencies.sh
- run: test/e2e-istio-tests-delegate.sh
e2e-gloo-testing:
machine: true

View File

@ -2,6 +2,27 @@
All notable changes to this project are documented in this file.
## 1.2.0 (2020-09-29)
Add support for New Relic metrics
#### Features
- Add New Relic as a metrics provider
[#691](https://github.com/weaveworks/flagger/pull/691)
#### Improvements
- Derive the label selector value from the target matchLabel
[#685](https://github.com/weaveworks/flagger/pull/685)
- Preserve Skipper predicates
[#681](https://github.com/weaveworks/flagger/pull/681)
#### Fixes
- Do not promote when not ready on skip analysis
[#695](https://github.com/weaveworks/flagger/pull/695)
## 1.1.0 (2020-08-18)
Add support for Skipper ingress controller

View File

@ -50,6 +50,7 @@ List of organizations using Flagger:
* [MediaMarktSaturn](https://www.mediamarktsaturn.com)
* [Weaveworks](https://weave.works)
* [Jumia Group](https://group.jumia.com)
* [eLife](https://elifesciences.org/)
If you are using Flagger, please submit a PR to add your organization to the list!
@ -210,10 +211,20 @@ For more details on how the canary analysis and promotion works please [read the
### Roadmap
#### [GitOps Toolkit](https://github.com/fluxcd/toolkit) compatibility
* Migrate Flagger to Kubernetes controller-runtime and [kubebuilder](https://github.com/kubernetes-sigs/kubebuilder)
* Make the Canary status compatible with [kstatus](https://github.com/kubernetes-sigs/cli-utils)
* Make Flagger emit Kubernetes events compatible with Flux v2 notification API
* Migrate CI to GitHub Actions and publish AMD64, ARM64 and ARMv7 container images
* Integrate Flagger into Flux v2 as the progressive delivery component
#### Integrations
* Add support for Kubernetes [Ingress v2](https://github.com/kubernetes-sigs/service-apis)
* Integrate with other service mesh like Consul Connect and ingress controllers like HAProxy, ALB
* Integrate with other metrics providers like InfluxDB, Stackdriver, SignalFX
* Add support for comparing the canary metrics to the primary ones and do the validation based on the derivation between the two
* Add support for SMI compatible service mesh solutions like Open Service Mesh and Consul Connect
* Add support for ingress controllers like HAProxy and ALB
* Add support for metrics providers like InfluxDB, Stackdriver, SignalFX
### Contributing

View File

@ -164,6 +164,9 @@ spec:
type: array
items:
type: string
delegation:
description: enable behaving as a delegate VirtualService
type: boolean
match:
description: URI match conditions
type: array
@ -813,6 +816,7 @@ spec:
- influxdb
- datadog
- cloudwatch
- newrelic
address:
description: API address of this provider
type: string

View File

@ -22,7 +22,7 @@ spec:
serviceAccountName: flagger
containers:
- name: flagger
image: weaveworks/flagger:1.1.0
image: weaveworks/flagger:1.2.0
imagePullPolicy: IfNotPresent
ports:
- name: http

View File

@ -1,7 +1,7 @@
apiVersion: v1
name: flagger
version: 1.1.0
appVersion: 1.1.0
version: 1.2.0
appVersion: 1.2.0
kubeVersion: ">=1.11.0-0"
engine: gotpl
description: Flagger is a progressive delivery operator for Kubernetes

View File

@ -125,6 +125,7 @@ Parameter | Description | Default
`serviceAccount.name` | The name of the service account to create or use. If not set and `serviceAccount.create` is `true`, a name is generated using the Flagger fullname | `""`
`serviceAccount.annotations` | Annotations for service account | `{}`
`ingressAnnotationsPrefix` | Annotations prefix for ingresses | `custom.ingress.kubernetes.io`
`includeLabelPrefix` | List of prefixes of labels that are copied when creating primary deployments or daemonsets. Use * to include all | `""`
`rbac.create` | If `true`, create and use RBAC resources | `true`
`rbac.pspEnabled` | If `true`, create and use a restricted pod security policy | `false`
`crd.create` | If `true`, create Flagger's CRDs (should be enabled for Helm v2 only) | `false`

View File

@ -164,6 +164,9 @@ spec:
type: array
items:
type: string
delegation:
description: enable behaving as a delegate VirtualService
type: boolean
match:
description: URI match conditions
type: array
@ -813,6 +816,7 @@ spec:
- influxdb
- datadog
- cloudwatch
- newrelic
address:
description: API address of this provider
type: string

View File

@ -106,12 +106,21 @@ spec:
{{- if .Values.ingressAnnotationsPrefix }}
- -ingress-annotations-prefix={{ .Values.ingressAnnotationsPrefix }}
{{- end }}
{{- if .Values.includeLabelPrefix }}
- -include-label-prefix={{ .Values.includeLabelPrefix }}
{{- end }}
{{- if .Values.ingressClass }}
- -ingress-class={{ .Values.ingressClass }}
{{- end }}
{{- if .Values.eventWebhook }}
- -event-webhook={{ .Values.eventWebhook }}
{{- end }}
{{- if .Values.kubeconfigQPS }}
- -kubeconfig-qps={{ .Values.kubeconfigQPS }}
{{- end }}
{{- if .Values.kubeconfigBurst }}
- -kubeconfig-burst={{ .Values.kubeconfigBurst }}
{{- end }}
{{- if .Values.istio.kubeconfig.secretName }}
- -kubeconfig-service-mesh=/tmp/istio-host/{{ .Values.istio.kubeconfig.key }}
{{- end }}

View File

@ -2,7 +2,7 @@
image:
repository: weaveworks/flagger
tag: 1.1.0
tag: 1.2.0
pullPolicy: IfNotPresent
pullSecret:
@ -124,9 +124,12 @@ tolerations: []
prometheus:
# to be used with ingress controllers
install: false
image: docker.io/prom/prometheus:v2.19.0
image: docker.io/prom/prometheus:v2.21.0
retention: 2h
kubeconfigQPS: ""
kubeconfigBurst: ""
# Istio multi-cluster service mesh (shared control plane single-network)
# https://istio.io/docs/setup/install/multicluster/shared-vpn/
istio:

View File

@ -1,7 +1,7 @@
apiVersion: v1
name: grafana
version: 1.4.0
appVersion: 6.5.1
version: 1.5.0
appVersion: 7.2.0
description: Grafana dashboards for monitoring Flagger canary deployments
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/weaveworks.png
home: https://flagger.app

View File

@ -6,7 +6,7 @@ replicaCount: 1
image:
repository: grafana/grafana
tag: 6.5.1
tag: 7.2.0
pullPolicy: IfNotPresent
podAnnotations: {}
@ -32,7 +32,7 @@ affinity: {}
user: admin
password:
# Istio Prometheus instance
# Prometheus instance
url: http://prometheus:9090
# Weave Cloud instance token

View File

@ -1,6 +1,6 @@
apiVersion: v1
version: 3.1.1
appVersion: 3.1.0
version: 5.0.0
appVersion: 5.0.0
name: podinfo
engine: gotpl
description: Flagger canary deployment demo application

View File

@ -1,7 +1,7 @@
# Default values for podinfo.
image:
repository: stefanprodan/podinfo
tag: 3.1.0
repository: ghcr.io/stefanprodan/podinfo
tag: 5.0.0
pullPolicy: IfNotPresent
podAnnotations: {}

View File

@ -38,11 +38,14 @@ import (
var (
masterURL string
kubeconfig string
kubeconfigQPS int
kubeconfigBurst int
metricsServer string
controlLoopInterval time.Duration
logLevel string
port string
msteamsURL string
includeLabelPrefix string
slackURL string
slackUser string
slackChannel string
@ -64,6 +67,8 @@ var (
func init() {
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
flag.IntVar(&kubeconfigQPS, "kubeconfig-qps", 100, "Set QPS for kubeconfig.")
flag.IntVar(&kubeconfigBurst, "kubeconfig-burst", 250, "Set Burst for kubeconfig.")
flag.StringVar(&masterURL, "master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.")
flag.StringVar(&metricsServer, "metrics-server", "http://prometheus:9090", "Prometheus URL.")
flag.DurationVar(&controlLoopInterval, "control-loop-interval", 10*time.Second, "Kubernetes API sync interval.")
@ -74,6 +79,7 @@ func init() {
flag.StringVar(&slackChannel, "slack-channel", "", "Slack channel.")
flag.StringVar(&eventWebhook, "event-webhook", "", "Webhook for publishing flagger events")
flag.StringVar(&msteamsURL, "msteams-url", "", "MS Teams incoming webhook URL.")
flag.StringVar(&includeLabelPrefix, "include-label-prefix", "", "List of prefixes of labels that are copied when creating primary deployments or daemonsets. Use * to include all.")
flag.IntVar(&threadiness, "threadiness", 2, "Worker concurrency.")
flag.BoolVar(&zapReplaceGlobals, "zap-replace-globals", false, "Whether to change the logging level of the global zap logger.")
flag.StringVar(&zapEncoding, "zap-encoding", "json", "Zap logger encoding.")
@ -116,6 +122,9 @@ func main() {
logger.Fatalf("Error building kubeconfig: %v", err)
}
cfg.QPS = float32(kubeconfigQPS)
cfg.Burst = kubeconfigBurst
kubeClient, err := kubernetes.NewForConfig(cfg)
if err != nil {
logger.Fatalf("Error building kubernetes clientset: %v", err)
@ -135,6 +144,9 @@ func main() {
logger.Fatalf("Error building host kubeconfig: %v", err)
}
cfgHost.QPS = float32(kubeconfigQPS)
cfgHost.Burst = kubeconfigBurst
meshClient, err := clientset.NewForConfig(cfgHost)
if err != nil {
logger.Fatalf("Error building mesh clientset: %v", err)
@ -184,7 +196,9 @@ func main() {
configTracker = &canary.NopTracker{}
}
canaryFactory := canary.NewFactory(kubeClient, flaggerClient, configTracker, labels, logger)
includeLabelPrefixArray := strings.Split(includeLabelPrefix, ",")
canaryFactory := canary.NewFactory(kubeClient, flaggerClient, configTracker, labels, includeLabelPrefixArray, logger)
c := controller.NewController(
kubeClient,

View File

@ -554,6 +554,85 @@ spec:
Flagger works for user facing apps exposed outside the cluster via an ingress gateway
and for backend HTTP APIs that are accessible only from inside the mesh.
If `Delegation` is enabled, Flagger would generate Istio VirtualService without hosts and gateway,
making the service compatible with Istio delegation.
```yaml
apiVersion: flagger.app/v1beta1
kind: Canary
metadata:
name: backend
namespace: test
spec:
service:
delegation: true
port: 9898
targetRef:
apiVersion: v1
kind: Deployment
name: podinfo
analysis:
interval: 15s
threshold: 15
maxWeight: 30
stepWeight: 10
```
Based on the above spec, Flagger will create the following virtual service:
```yaml
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: backend
namespace: test
ownerReferences:
- apiVersion: flagger.app/v1beta1
blockOwnerDeletion: true
controller: true
kind: Canary
name: backend
uid: 58562662-5e10-4512-b269-2b789c1b30fe
spec:
http:
- route:
- destination:
host: podinfo-primary
weight: 100
- destination:
host: podinfo-canary
weight: 0
```
Therefore, The following virtual service forward the traffic to `/podinfo` by the above delegate VirtualService.
```yaml
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: frontend
namespace: test
spec:
gateways:
- public-gateway.istio-system.svc.cluster.local
- mesh
hosts:
- frontend.example.com
- frontend
http:
- match:
- uri:
prefix: /podinfo
rewrite:
uri: /
delegate:
name: backend
namespace: test
```
Note that pilot env `PILOT_ENABLE_VIRTUAL_SERVICE_DELEGATE` must also be set.
(For the use of Istio Delegation, you can refer to the documentation of [Virtual Service](https://istio.io/latest/docs/reference/config/networking/virtual-service/#Delegate) and [pilot environment variables](https://istio.io/latest/docs/reference/commands/pilot-discovery/#envvars).)
### Istio Ingress Gateway
**How can I expose multiple canaries on the same external domain?**

View File

@ -12,6 +12,8 @@ Install Istio with telemetry support and Prometheus:
```bash
istioctl manifest apply --set profile=default
# istio 1.7 or newer
istioctl install --set profile=default
```
Install Flagger using Kustomize (kubectl >= 1.14) in the `istio-system` namespace:

View File

@ -345,7 +345,7 @@ podinfod=stefanprodan/podinfo:3.1.3
Generate high response latency:
```bash
watch curl http://app.exmaple.com/delay/2
watch curl http://app.example.com/delay/2
```
Watch Flagger logs:

View File

@ -351,7 +351,7 @@ podinfod=stefanprodan/podinfo:4.0.6
Generate high response latency:
```bash
watch curl http://app.exmaple.com/delay/2
watch curl http://app.example.com/delay/2
```
Watch Flagger logs:

View File

@ -314,3 +314,56 @@ Reference the template in the canary analysis:
```
**Note** that Flagger need AWS IAM permission to perform `cloudwatch:GetMetricData` to use this provider.
### New Relic
You can create custom metric checks using the New Relic provider.
Create a secret with your New Relic Insights credentials:
```yaml
apiVersion: v1
kind: Secret
metadata:
name: newrelic
namespace: istio-system
data:
newrelic_account_id: your-account-id
newrelic_query_key: your-insights-query-key
```
New Relic template example:
```yaml
apiVersion: flagger.app/v1beta1
kind: MetricTemplate
metadata:
name: newrelic-error-rate
namespace: ingress-nginx
spec:
provider:
type: newrelic
secretRef:
name: newrelic
query: |
SELECT
filter(sum(nginx_ingress_controller_requests), WHERE status >= '500') /
sum(nginx_ingress_controller_requests) * 100
FROM Metric
WHERE metricName = 'nginx_ingress_controller_requests'
AND ingress = '{{ ingress }}' AND namespace = '{{ namespace }}'
```
Reference the template in the canary analysis:
```yaml
analysis:
metrics:
- name: "error rate"
templateRef:
name: newrelic-error-rate
namespace: ingress-nginx
thresholdRange:
max: 5
interval: 1m
```

View File

@ -164,6 +164,9 @@ spec:
type: array
items:
type: string
delegation:
description: enable behaving as a delegate VirtualService
type: boolean
match:
description: URI match conditions
type: array
@ -813,6 +816,7 @@ spec:
- influxdb
- datadog
- cloudwatch
- newrelic
address:
description: API address of this provider
type: string

View File

@ -8,4 +8,4 @@ resources:
- deployment.yaml
images:
- name: weaveworks/flagger
newTag: 1.1.0
newTag: 1.2.0

View File

@ -19,7 +19,7 @@ spec:
serviceAccountName: flagger-prometheus
containers:
- name: prometheus
image: prom/prometheus:v2.19.0
image: prom/prometheus:v2.21.0
imagePullPolicy: IfNotPresent
args:
- '--storage.tsdb.retention=2h'

View File

@ -137,6 +137,12 @@ type CanaryService struct {
// +optional
Hosts []string `json:"hosts,omitempty"`
// If enabled, Flagger would generate Istio VirtualServices without hosts and gateway,
// making the service compatible with Istio delegation. Note that pilot env
// `PILOT_ENABLE_VIRTUAL_SERVICE_DELEGATE` must also be set.
// +optional
Delegation bool `json:"delegation,omitempty"`
// TrafficPolicy attached to the generated Istio destination rules
// +optional
TrafficPolicy *istiov1alpha3.TrafficPolicy `json:"trafficPolicy,omitempty"`

View File

@ -25,7 +25,8 @@ func TestConfigIsDisabled(t *testing.T) {
func TestConfigTracker_ConfigMaps(t *testing.T) {
t.Run("deployment", func(t *testing.T) {
mocks := newDeploymentFixture()
dc := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"}
mocks := newDeploymentFixture(dc)
configMap := newDeploymentControllerTestConfigMap()
configMapProjected := newDeploymentControllerTestConfigProjected()
@ -89,7 +90,8 @@ func TestConfigTracker_ConfigMaps(t *testing.T) {
})
t.Run("daemonset", func(t *testing.T) {
mocks := newDaemonSetFixture()
dc := daemonsetConfigs{name: "podinfo", label: "name", labelValue: "podinfo"}
mocks := newDaemonSetFixture(dc)
configMap := newDaemonSetControllerTestConfigMap()
configMapProjected := newDaemonSetControllerTestConfigProjected()
@ -156,7 +158,8 @@ func TestConfigTracker_ConfigMaps(t *testing.T) {
func TestConfigTracker_Secrets(t *testing.T) {
t.Run("deployment", func(t *testing.T) {
mocks := newDeploymentFixture()
dc := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"}
mocks := newDeploymentFixture(dc)
secret := newDeploymentControllerTestSecret()
secretProjected := newDeploymentControllerTestSecretProjected()
@ -220,7 +223,8 @@ func TestConfigTracker_Secrets(t *testing.T) {
})
t.Run("daemonset", func(t *testing.T) {
mocks := newDaemonSetFixture()
dc := daemonsetConfigs{name: "podinfo", label: "name", labelValue: "podinfo"}
mocks := newDaemonSetFixture(dc)
secret := newDaemonSetControllerTestSecret()
secretProjected := newDaemonSetControllerTestSecretProjected()

View File

@ -7,7 +7,7 @@ import (
type Controller interface {
IsPrimaryReady(canary *flaggerv1.Canary) error
IsCanaryReady(canary *flaggerv1.Canary) (bool, error)
GetMetadata(canary *flaggerv1.Canary) (string, map[string]int32, error)
GetMetadata(canary *flaggerv1.Canary) (string, string, map[string]int32, error)
SyncStatus(canary *flaggerv1.Canary, status flaggerv1.CanaryStatus) error
SetStatusFailedChecks(canary *flaggerv1.Canary, val int) error
SetStatusWeight(canary *flaggerv1.Canary, val int) error

View File

@ -22,11 +22,12 @@ var (
// DaemonSetController is managing the operations for Kubernetes DaemonSet kind
type DaemonSetController struct {
kubeClient kubernetes.Interface
flaggerClient clientset.Interface
logger *zap.SugaredLogger
configTracker Tracker
labels []string
kubeClient kubernetes.Interface
flaggerClient clientset.Interface
logger *zap.SugaredLogger
configTracker Tracker
labels []string
includeLabelPrefix []string
}
func (c *DaemonSetController) ScaleToZero(cd *flaggerv1.Canary) error {
@ -76,7 +77,7 @@ func (c *DaemonSetController) ScaleFromZero(cd *flaggerv1.Canary) error {
// Initialize creates the primary DaemonSet, scales down the canary DaemonSet,
// and returns the pod selector label and container ports
func (c *DaemonSetController) Initialize(cd *flaggerv1.Canary) (err error) {
err = c.createPrimaryDaemonSet(cd)
err = c.createPrimaryDaemonSet(cd, c.includeLabelPrefix)
if err != nil {
return fmt.Errorf("createPrimaryDaemonSet failed: %w", err)
}
@ -107,7 +108,8 @@ func (c *DaemonSetController) Promote(cd *flaggerv1.Canary) error {
return fmt.Errorf("damonset %s.%s get query error: %v", targetName, cd.Namespace, err)
}
label, err := c.getSelectorLabel(canary)
label, labelValue, err := c.getSelectorLabel(canary)
primaryLabelValue := fmt.Sprintf("%s-primary", labelValue)
if err != nil {
return fmt.Errorf("getSelectorLabel failed: %w", err)
}
@ -146,7 +148,7 @@ func (c *DaemonSetController) Promote(cd *flaggerv1.Canary) error {
}
primaryCopy.Spec.Template.Annotations = annotations
primaryCopy.Spec.Template.Labels = makePrimaryLabels(canary.Spec.Template.Labels, primaryName, label)
primaryCopy.Spec.Template.Labels = makePrimaryLabels(canary.Spec.Template.Labels, primaryLabelValue, label)
// apply update
_, err = c.kubeClient.AppsV1().DaemonSets(cd.Namespace).Update(context.TODO(), primaryCopy, metav1.UpdateOptions{})
@ -179,27 +181,27 @@ func (c *DaemonSetController) HasTargetChanged(cd *flaggerv1.Canary) (bool, erro
}
// GetMetadata returns the pod label selector and svc ports
func (c *DaemonSetController) GetMetadata(cd *flaggerv1.Canary) (string, map[string]int32, error) {
func (c *DaemonSetController) GetMetadata(cd *flaggerv1.Canary) (string, string, map[string]int32, error) {
targetName := cd.Spec.TargetRef.Name
canaryDae, err := c.kubeClient.AppsV1().DaemonSets(cd.Namespace).Get(context.TODO(), targetName, metav1.GetOptions{})
if err != nil {
return "", nil, fmt.Errorf("daemonset %s.%s get query error: %w", targetName, cd.Namespace, err)
return "", "", nil, fmt.Errorf("daemonset %s.%s get query error: %w", targetName, cd.Namespace, err)
}
label, err := c.getSelectorLabel(canaryDae)
label, labelValue, err := c.getSelectorLabel(canaryDae)
if err != nil {
return "", nil, fmt.Errorf("getSelectorLabel failed: %w", err)
return "", "", nil, fmt.Errorf("getSelectorLabel failed: %w", err)
}
var ports map[string]int32
if cd.Spec.Service.PortDiscovery {
ports = getPorts(cd, canaryDae.Spec.Template.Spec.Containers)
}
return label, ports, nil
return label, labelValue, ports, nil
}
func (c *DaemonSetController) createPrimaryDaemonSet(cd *flaggerv1.Canary) error {
func (c *DaemonSetController) createPrimaryDaemonSet(cd *flaggerv1.Canary, includeLabelPrefix []string) error {
targetName := cd.Spec.TargetRef.Name
primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name)
@ -214,7 +216,11 @@ func (c *DaemonSetController) createPrimaryDaemonSet(cd *flaggerv1.Canary) error
targetName, cd.Namespace, canaryDae.Spec.UpdateStrategy.Type)
}
label, err := c.getSelectorLabel(canaryDae)
// Create the labels map but filter unwanted labels
labels := includeLabelsByPrefix(canaryDae.Labels, includeLabelPrefix)
label, labelValue, err := c.getSelectorLabel(canaryDae)
primaryLabelValue := fmt.Sprintf("%s-primary", labelValue)
if err != nil {
return fmt.Errorf("getSelectorLabel failed: %w", err)
}
@ -237,11 +243,10 @@ func (c *DaemonSetController) createPrimaryDaemonSet(cd *flaggerv1.Canary) error
// create primary daemonset
primaryDae = &appsv1.DaemonSet{
ObjectMeta: metav1.ObjectMeta{
Name: primaryName,
Namespace: cd.Namespace,
Labels: map[string]string{
label: primaryName,
},
Name: primaryName,
Namespace: cd.Namespace,
Labels: makePrimaryLabels(labels, primaryLabelValue, label),
Annotations: canaryDae.Annotations,
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(cd, schema.GroupVersionKind{
Group: flaggerv1.SchemeGroupVersion.Group,
@ -256,12 +261,12 @@ func (c *DaemonSetController) createPrimaryDaemonSet(cd *flaggerv1.Canary) error
UpdateStrategy: canaryDae.Spec.UpdateStrategy,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
label: primaryName,
label: primaryLabelValue,
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: makePrimaryLabels(canaryDae.Spec.Template.Labels, primaryName, label),
Labels: makePrimaryLabels(canaryDae.Spec.Template.Labels, primaryLabelValue, label),
Annotations: annotations,
},
// update spec with the primary secrets and config maps
@ -281,14 +286,14 @@ func (c *DaemonSetController) createPrimaryDaemonSet(cd *flaggerv1.Canary) error
}
// getSelectorLabel returns the selector match label
func (c *DaemonSetController) getSelectorLabel(daemonSet *appsv1.DaemonSet) (string, error) {
func (c *DaemonSetController) getSelectorLabel(daemonSet *appsv1.DaemonSet) (string, string, error) {
for _, l := range c.labels {
if _, ok := daemonSet.Spec.Selector.MatchLabels[l]; ok {
return l, nil
return l, daemonSet.Spec.Selector.MatchLabels[l], nil
}
}
return "", fmt.Errorf(
return "", "", fmt.Errorf(
"daemonset %s.%s spec.selector.matchLabels must contain one of %v'",
daemonSet.Name, daemonSet.Namespace, c.labels,
)

View File

@ -2,6 +2,7 @@ package canary
import (
"context"
"fmt"
"testing"
"github.com/stretchr/testify/assert"
@ -14,22 +15,47 @@ import (
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1beta1"
)
func TestDaemonSetController_Sync(t *testing.T) {
mocks := newDaemonSetFixture()
func TestDaemonSetController_Sync_ConsistentNaming(t *testing.T) {
dc := daemonsetConfigs{name: "podinfo", label: "name", labelValue: "podinfo"}
mocks := newDaemonSetFixture(dc)
err := mocks.controller.Initialize(mocks.canary)
require.NoError(t, err)
daePrimary, err := mocks.kubeClient.AppsV1().DaemonSets("default").Get(context.TODO(), "podinfo-primary", metav1.GetOptions{})
daePrimary, err := mocks.kubeClient.AppsV1().DaemonSets("default").Get(context.TODO(), fmt.Sprintf("%s-primary", dc.name), metav1.GetOptions{})
require.NoError(t, err)
dae := newDaemonSetControllerTestPodInfo()
dae := newDaemonSetControllerTestPodInfo(dc)
primaryImage := daePrimary.Spec.Template.Spec.Containers[0].Image
sourceImage := dae.Spec.Template.Spec.Containers[0].Image
assert.Equal(t, primaryImage, sourceImage)
primarySelectorValue := daePrimary.Spec.Selector.MatchLabels[dc.label]
sourceSelectorValue := dae.Spec.Selector.MatchLabels[dc.label]
assert.Equal(t, primarySelectorValue, fmt.Sprintf("%s-primary", sourceSelectorValue))
}
func TestDaemonSetController_Sync_InconsistentNaming(t *testing.T) {
dc := daemonsetConfigs{name: "podinfo-service", label: "name", labelValue: "podinfo"}
mocks := newDaemonSetFixture(dc)
err := mocks.controller.Initialize(mocks.canary)
require.NoError(t, err)
daePrimary, err := mocks.kubeClient.AppsV1().DaemonSets("default").Get(context.TODO(), fmt.Sprintf("%s-primary", dc.name), metav1.GetOptions{})
require.NoError(t, err)
dae := newDaemonSetControllerTestPodInfo(dc)
primaryImage := daePrimary.Spec.Template.Spec.Containers[0].Image
sourceImage := dae.Spec.Template.Spec.Containers[0].Image
assert.Equal(t, primaryImage, sourceImage)
primarySelectorValue := daePrimary.Spec.Selector.MatchLabels[dc.label]
sourceSelectorValue := dae.Spec.Selector.MatchLabels[dc.label]
assert.Equal(t, primarySelectorValue, fmt.Sprintf("%s-primary", sourceSelectorValue))
}
func TestDaemonSetController_Promote(t *testing.T) {
mocks := newDaemonSetFixture()
dc := daemonsetConfigs{name: "podinfo", label: "name", labelValue: "podinfo"}
mocks := newDaemonSetFixture(dc)
err := mocks.controller.Initialize(mocks.canary)
require.NoError(t, err)
@ -58,7 +84,8 @@ func TestDaemonSetController_Promote(t *testing.T) {
}
func TestDaemonSetController_NoConfigTracking(t *testing.T) {
mocks := newDaemonSetFixture()
dc := daemonsetConfigs{name: "podinfo", label: "name", labelValue: "podinfo"}
mocks := newDaemonSetFixture(dc)
mocks.controller.configTracker = &NopTracker{}
err := mocks.controller.Initialize(mocks.canary)
@ -75,7 +102,8 @@ func TestDaemonSetController_NoConfigTracking(t *testing.T) {
}
func TestDaemonSetController_HasTargetChanged(t *testing.T) {
mocks := newDaemonSetFixture()
dc := daemonsetConfigs{name: "podinfo", label: "name", labelValue: "podinfo"}
mocks := newDaemonSetFixture(dc)
err := mocks.controller.Initialize(mocks.canary)
require.NoError(t, err)
@ -163,7 +191,8 @@ func TestDaemonSetController_HasTargetChanged(t *testing.T) {
func TestDaemonSetController_Scale(t *testing.T) {
t.Run("ScaleToZero", func(t *testing.T) {
mocks := newDaemonSetFixture()
dc := daemonsetConfigs{name: "podinfo", label: "name", labelValue: "podinfo"}
mocks := newDaemonSetFixture(dc)
err := mocks.controller.Initialize(mocks.canary)
require.NoError(t, err)
@ -179,7 +208,8 @@ func TestDaemonSetController_Scale(t *testing.T) {
}
})
t.Run("ScaleFromZeo", func(t *testing.T) {
mocks := newDaemonSetFixture()
dc := daemonsetConfigs{name: "podinfo", label: "name", labelValue: "podinfo"}
mocks := newDaemonSetFixture(dc)
err := mocks.controller.Initialize(mocks.canary)
require.NoError(t, err)
@ -197,7 +227,8 @@ func TestDaemonSetController_Scale(t *testing.T) {
}
func TestDaemonSetController_Finalize(t *testing.T) {
mocks := newDaemonSetFixture()
dc := daemonsetConfigs{name: "podinfo", label: "name", labelValue: "podinfo"}
mocks := newDaemonSetFixture(dc)
err := mocks.controller.Initialize(mocks.canary)
require.NoError(t, err)

View File

@ -23,14 +23,20 @@ type daemonSetControllerFixture struct {
logger *zap.SugaredLogger
}
func newDaemonSetFixture() daemonSetControllerFixture {
type daemonsetConfigs struct {
name string
labelValue string
label string
}
func newDaemonSetFixture(dc daemonsetConfigs) daemonSetControllerFixture {
// init canary
canary := newDaemonSetControllerTestCanary()
canary := newDaemonSetControllerTestCanary(dc)
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
// init kube clientset and register mock objects
kubeClient := fake.NewSimpleClientset(
newDaemonSetControllerTestPodInfo(),
newDaemonSetControllerTestPodInfo(dc),
newDaemonSetControllerTestConfigMap(),
newDaemonSetControllerTestConfigMapEnv(),
newDaemonSetControllerTestConfigMapVol(),
@ -264,7 +270,7 @@ func newDaemonSetControllerTestSecretTrackerDisabled() *corev1.Secret {
}
}
func newDaemonSetControllerTestCanary() *flaggerv1.Canary {
func newDaemonSetControllerTestCanary(dc daemonsetConfigs) *flaggerv1.Canary {
cd := &flaggerv1.Canary{
TypeMeta: metav1.TypeMeta{APIVersion: flaggerv1.SchemeGroupVersion.String()},
ObjectMeta: metav1.ObjectMeta{
@ -273,7 +279,7 @@ func newDaemonSetControllerTestCanary() *flaggerv1.Canary {
},
Spec: flaggerv1.CanarySpec{
TargetRef: flaggerv1.CrossNamespaceObjectReference{
Name: "podinfo",
Name: dc.name,
APIVersion: "apps/v1",
Kind: "DaemonSet",
},
@ -282,23 +288,23 @@ func newDaemonSetControllerTestCanary() *flaggerv1.Canary {
return cd
}
func newDaemonSetControllerTestPodInfo() *appsv1.DaemonSet {
func newDaemonSetControllerTestPodInfo(dc daemonsetConfigs) *appsv1.DaemonSet {
d := &appsv1.DaemonSet{
TypeMeta: metav1.TypeMeta{APIVersion: appsv1.SchemeGroupVersion.String()},
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "podinfo",
Name: dc.name,
},
Spec: appsv1.DaemonSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"name": "podinfo",
dc.label: dc.labelValue,
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"name": "podinfo",
dc.label: dc.labelValue,
},
},
Spec: corev1.PodSpec{

View File

@ -12,7 +12,8 @@ import (
)
func TestDaemonSetController_IsReady(t *testing.T) {
mocks := newDaemonSetFixture()
dc := daemonsetConfigs{name: "podinfo", label: "name", labelValue: "podinfo"}
mocks := newDaemonSetFixture(dc)
err := mocks.controller.Initialize(mocks.canary)
require.NoError(t, err)
@ -24,7 +25,8 @@ func TestDaemonSetController_IsReady(t *testing.T) {
}
func TestDaemonSetController_isDaemonSetReady(t *testing.T) {
mocks := newDaemonSetFixture()
dc := daemonsetConfigs{name: "podinfo", label: "name", labelValue: "podinfo"}
mocks := newDaemonSetFixture(dc)
cd := &flaggerv1.Canary{}
// observed generation is less than desired generation

View File

@ -12,7 +12,8 @@ import (
)
func TestDaemonSetController_SyncStatus(t *testing.T) {
mocks := newDaemonSetFixture()
dc := daemonsetConfigs{name: "podinfo", label: "name", labelValue: "podinfo"}
mocks := newDaemonSetFixture(dc)
err := mocks.controller.Initialize(mocks.canary)
require.NoError(t, err)
@ -36,7 +37,8 @@ func TestDaemonSetController_SyncStatus(t *testing.T) {
}
func TestDaemonSetController_SetFailedChecks(t *testing.T) {
mocks := newDaemonSetFixture()
dc := daemonsetConfigs{name: "podinfo", label: "name", labelValue: "podinfo"}
mocks := newDaemonSetFixture(dc)
err := mocks.controller.Initialize(mocks.canary)
require.NoError(t, err)
@ -49,7 +51,8 @@ func TestDaemonSetController_SetFailedChecks(t *testing.T) {
}
func TestDaemonSetController_SetState(t *testing.T) {
mocks := newDaemonSetFixture()
dc := daemonsetConfigs{name: "podinfo", label: "name", labelValue: "podinfo"}
mocks := newDaemonSetFixture(dc)
err := mocks.controller.Initialize(mocks.canary)
require.NoError(t, err)

View File

@ -20,18 +20,19 @@ import (
// DeploymentController is managing the operations for Kubernetes Deployment kind
type DeploymentController struct {
kubeClient kubernetes.Interface
flaggerClient clientset.Interface
logger *zap.SugaredLogger
configTracker Tracker
labels []string
kubeClient kubernetes.Interface
flaggerClient clientset.Interface
logger *zap.SugaredLogger
configTracker Tracker
labels []string
includeLabelPrefix []string
}
// Initialize creates the primary deployment, hpa,
// scales to zero the canary deployment and returns the pod selector label and container ports
func (c *DeploymentController) Initialize(cd *flaggerv1.Canary) (err error) {
primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name)
if err := c.createPrimaryDeployment(cd); err != nil {
if err := c.createPrimaryDeployment(cd, c.includeLabelPrefix); err != nil {
return fmt.Errorf("createPrimaryDeployment failed: %w", err)
}
@ -72,7 +73,8 @@ func (c *DeploymentController) Promote(cd *flaggerv1.Canary) error {
return fmt.Errorf("deployment %s.%s get query error: %w", targetName, cd.Namespace, err)
}
label, err := c.getSelectorLabel(canary)
label, labelValue, err := c.getSelectorLabel(canary)
primaryLabelValue := fmt.Sprintf("%s-primary", labelValue)
if err != nil {
return fmt.Errorf("getSelectorLabel failed: %w", err)
}
@ -107,7 +109,7 @@ func (c *DeploymentController) Promote(cd *flaggerv1.Canary) error {
}
primaryCopy.Spec.Template.Annotations = annotations
primaryCopy.Spec.Template.Labels = makePrimaryLabels(canary.Spec.Template.Labels, primaryName, label)
primaryCopy.Spec.Template.Labels = makePrimaryLabels(canary.Spec.Template.Labels, primaryLabelValue, label)
// apply update
_, err = c.kubeClient.AppsV1().Deployments(cd.Namespace).Update(context.TODO(), primaryCopy, metav1.UpdateOptions{})
@ -181,17 +183,17 @@ func (c *DeploymentController) ScaleFromZero(cd *flaggerv1.Canary) error {
}
// GetMetadata returns the pod label selector and svc ports
func (c *DeploymentController) GetMetadata(cd *flaggerv1.Canary) (string, map[string]int32, error) {
func (c *DeploymentController) GetMetadata(cd *flaggerv1.Canary) (string, string, map[string]int32, error) {
targetName := cd.Spec.TargetRef.Name
canaryDep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(context.TODO(), targetName, metav1.GetOptions{})
if err != nil {
return "", nil, fmt.Errorf("deployment %s.%s get query error: %w", targetName, cd.Namespace, err)
return "", "", nil, fmt.Errorf("deployment %s.%s get query error: %w", targetName, cd.Namespace, err)
}
label, err := c.getSelectorLabel(canaryDep)
label, labelValue, err := c.getSelectorLabel(canaryDep)
if err != nil {
return "", nil, fmt.Errorf("getSelectorLabel failed: %w", err)
return "", "", nil, fmt.Errorf("getSelectorLabel failed: %w", err)
}
var ports map[string]int32
@ -199,18 +201,22 @@ func (c *DeploymentController) GetMetadata(cd *flaggerv1.Canary) (string, map[st
ports = getPorts(cd, canaryDep.Spec.Template.Spec.Containers)
}
return label, ports, nil
return label, labelValue, ports, nil
}
func (c *DeploymentController) createPrimaryDeployment(cd *flaggerv1.Canary) error {
func (c *DeploymentController) createPrimaryDeployment(cd *flaggerv1.Canary, includeLabelPrefix []string) error {
targetName := cd.Spec.TargetRef.Name
primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name)
canaryDep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(context.TODO(), targetName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("deplyoment %s.%s get query error: %w", targetName, cd.Namespace, err)
return fmt.Errorf("deployment %s.%s get query error: %w", targetName, cd.Namespace, err)
}
label, err := c.getSelectorLabel(canaryDep)
// Create the labels map but filter unwanted labels
labels := includeLabelsByPrefix(canaryDep.Labels, includeLabelPrefix)
label, labelValue, err := c.getSelectorLabel(canaryDep)
primaryLabelValue := fmt.Sprintf("%s-primary", labelValue)
if err != nil {
return fmt.Errorf("getSelectorLabel failed: %w", err)
}
@ -238,11 +244,10 @@ func (c *DeploymentController) createPrimaryDeployment(cd *flaggerv1.Canary) err
// create primary deployment
primaryDep = &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: primaryName,
Namespace: cd.Namespace,
Labels: map[string]string{
label: primaryName,
},
Name: primaryName,
Namespace: cd.Namespace,
Labels: makePrimaryLabels(labels, primaryLabelValue, label),
Annotations: canaryDep.Annotations,
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(cd, schema.GroupVersionKind{
Group: flaggerv1.SchemeGroupVersion.Group,
@ -259,12 +264,12 @@ func (c *DeploymentController) createPrimaryDeployment(cd *flaggerv1.Canary) err
Strategy: canaryDep.Spec.Strategy,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
label: primaryName,
label: primaryLabelValue,
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: makePrimaryLabels(canaryDep.Spec.Template.Labels, primaryName, label),
Labels: makePrimaryLabels(canaryDep.Spec.Template.Labels, primaryLabelValue, label),
Annotations: annotations,
},
// update spec with the primary secrets and config maps
@ -361,14 +366,14 @@ func (c *DeploymentController) reconcilePrimaryHpa(cd *flaggerv1.Canary, init bo
}
// getSelectorLabel returns the selector match label
func (c *DeploymentController) getSelectorLabel(deployment *appsv1.Deployment) (string, error) {
func (c *DeploymentController) getSelectorLabel(deployment *appsv1.Deployment) (string, string, error) {
for _, l := range c.labels {
if _, ok := deployment.Spec.Selector.MatchLabels[l]; ok {
return l, nil
return l, deployment.Spec.Selector.MatchLabels[l], nil
}
}
return "", fmt.Errorf(
return "", "", fmt.Errorf(
"deployment %s.%s spec.selector.matchLabels must contain one of %v",
deployment.Name, deployment.Namespace, c.labels,
)

View File

@ -2,6 +2,7 @@ package canary
import (
"context"
"fmt"
"testing"
"github.com/stretchr/testify/assert"
@ -14,25 +15,51 @@ import (
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1beta1"
)
func TestDeploymentController_Sync(t *testing.T) {
mocks := newDeploymentFixture()
func TestDeploymentController_Sync_ConsistentNaming(t *testing.T) {
dc := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"}
mocks := newDeploymentFixture(dc)
mocks.initializeCanary(t)
depPrimary, err := mocks.kubeClient.AppsV1().Deployments("default").Get(context.TODO(), "podinfo-primary", metav1.GetOptions{})
depPrimary, err := mocks.kubeClient.AppsV1().Deployments("default").Get(context.TODO(), fmt.Sprintf("%s-primary", dc.name), metav1.GetOptions{})
require.NoError(t, err)
dep := newDeploymentControllerTest()
dep := newDeploymentControllerTest(dc)
primaryImage := depPrimary.Spec.Template.Spec.Containers[0].Image
sourceImage := dep.Spec.Template.Spec.Containers[0].Image
assert.Equal(t, sourceImage, primaryImage)
primarySelectorValue := depPrimary.Spec.Selector.MatchLabels[dc.label]
assert.Equal(t, primarySelectorValue, fmt.Sprintf("%s-primary", dc.labelValue))
hpaPrimary, err := mocks.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers("default").Get(context.TODO(), "podinfo-primary", metav1.GetOptions{})
require.NoError(t, err)
assert.Equal(t, depPrimary.Name, hpaPrimary.Spec.ScaleTargetRef.Name)
}
func TestDeploymentController_Sync_InconsistentNaming(t *testing.T) {
dc := deploymentConfigs{name: "podinfo-service", label: "name", labelValue: "podinfo"}
mocks := newDeploymentFixture(dc)
mocks.initializeCanary(t)
depPrimary, err := mocks.kubeClient.AppsV1().Deployments("default").Get(context.TODO(), fmt.Sprintf("%s-primary", dc.name), metav1.GetOptions{})
require.NoError(t, err)
dep := newDeploymentControllerTest(dc)
primaryImage := depPrimary.Spec.Template.Spec.Containers[0].Image
sourceImage := dep.Spec.Template.Spec.Containers[0].Image
assert.Equal(t, sourceImage, primaryImage)
primarySelectorValue := depPrimary.Spec.Selector.MatchLabels[dc.label]
assert.Equal(t, primarySelectorValue, fmt.Sprintf("%s-primary", dc.labelValue))
hpaPrimary, err := mocks.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers("default").Get(context.TODO(), "podinfo-primary", metav1.GetOptions{})
require.NoError(t, err)
assert.Equal(t, depPrimary.Name, hpaPrimary.Spec.ScaleTargetRef.Name)
}
func TestDeploymentController_Promote(t *testing.T) {
mocks := newDeploymentFixture()
dc := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"}
mocks := newDeploymentFixture(dc)
mocks.initializeCanary(t)
dep2 := newDeploymentControllerTestV2()
@ -72,7 +99,8 @@ func TestDeploymentController_Promote(t *testing.T) {
}
func TestDeploymentController_ScaleToZero(t *testing.T) {
mocks := newDeploymentFixture()
dc := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"}
mocks := newDeploymentFixture(dc)
mocks.initializeCanary(t)
err := mocks.controller.ScaleToZero(mocks.canary)
@ -84,7 +112,8 @@ func TestDeploymentController_ScaleToZero(t *testing.T) {
}
func TestDeploymentController_NoConfigTracking(t *testing.T) {
mocks := newDeploymentFixture()
dc := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"}
mocks := newDeploymentFixture(dc)
mocks.controller.configTracker = &NopTracker{}
mocks.initializeCanary(t)
@ -99,7 +128,8 @@ func TestDeploymentController_NoConfigTracking(t *testing.T) {
}
func TestDeploymentController_HasTargetChanged(t *testing.T) {
mocks := newDeploymentFixture()
dc := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"}
mocks := newDeploymentFixture(dc)
mocks.initializeCanary(t)
// save last applied hash
@ -185,7 +215,8 @@ func TestDeploymentController_HasTargetChanged(t *testing.T) {
}
func TestDeploymentController_Finalize(t *testing.T) {
mocks := newDeploymentFixture()
dc := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"}
mocks := newDeploymentFixture(dc)
for _, tc := range []struct {
mocks deploymentControllerFixture

View File

@ -29,6 +29,16 @@ type deploymentControllerFixture struct {
logger *zap.SugaredLogger
}
type canaryConfigs struct {
targetName string
}
type deploymentConfigs struct {
name string
labelValue string
label string
}
func (d deploymentControllerFixture) initializeCanary(t *testing.T) {
err := d.controller.Initialize(d.canary)
require.Error(t, err) // not ready yet
@ -51,14 +61,15 @@ func (d deploymentControllerFixture) initializeCanary(t *testing.T) {
require.NoError(t, d.controller.Initialize(d.canary))
}
func newDeploymentFixture() deploymentControllerFixture {
func newDeploymentFixture(dc deploymentConfigs) deploymentControllerFixture {
// init canary
canary := newDeploymentControllerTestCanary()
cc := canaryConfigs{targetName: dc.name}
canary := newDeploymentControllerTestCanary(cc)
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
// init kube clientset and register mock objects
kubeClient := fake.NewSimpleClientset(
newDeploymentControllerTest(),
newDeploymentControllerTest(dc),
newDeploymentControllerTestHPA(),
newDeploymentControllerTestConfigMap(),
newDeploymentControllerTestConfigMapEnv(),
@ -293,7 +304,7 @@ func newDeploymentControllerTestSecretTrackerDisabled() *corev1.Secret {
}
}
func newDeploymentControllerTestCanary() *flaggerv1.Canary {
func newDeploymentControllerTestCanary(cc canaryConfigs) *flaggerv1.Canary {
cd := &flaggerv1.Canary{
TypeMeta: metav1.TypeMeta{APIVersion: flaggerv1.SchemeGroupVersion.String()},
ObjectMeta: metav1.ObjectMeta{
@ -302,7 +313,7 @@ func newDeploymentControllerTestCanary() *flaggerv1.Canary {
},
Spec: flaggerv1.CanarySpec{
TargetRef: flaggerv1.CrossNamespaceObjectReference{
Name: "podinfo",
Name: cc.targetName,
APIVersion: "apps/v1",
Kind: "Deployment",
},
@ -322,23 +333,23 @@ func newDeploymentControllerTestCanary() *flaggerv1.Canary {
return cd
}
func newDeploymentControllerTest() *appsv1.Deployment {
func newDeploymentControllerTest(dc deploymentConfigs) *appsv1.Deployment {
d := &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{APIVersion: appsv1.SchemeGroupVersion.String()},
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "podinfo",
Name: dc.name,
},
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"name": "podinfo",
dc.label: dc.labelValue,
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"name": "podinfo",
dc.label: dc.labelValue,
},
},
Spec: corev1.PodSpec{

View File

@ -12,7 +12,8 @@ import (
)
func TestDeploymentController_IsReady(t *testing.T) {
mocks := newDeploymentFixture()
dc := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"}
mocks := newDeploymentFixture(dc)
mocks.controller.Initialize(mocks.canary)
err := mocks.controller.IsPrimaryReady(mocks.canary)
@ -23,7 +24,8 @@ func TestDeploymentController_IsReady(t *testing.T) {
}
func TestDeploymentController_isDeploymentReady(t *testing.T) {
mocks := newDeploymentFixture()
dc := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"}
mocks := newDeploymentFixture(dc)
// observed generation is less than desired generation
dp := &appsv1.Deployment{Status: appsv1.DeploymentStatus{ObservedGeneration: -1}}

View File

@ -12,7 +12,8 @@ import (
)
func TestDeploymentController_SyncStatus(t *testing.T) {
mocks := newDeploymentFixture()
dc := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"}
mocks := newDeploymentFixture(dc)
mocks.initializeCanary(t)
status := flaggerv1.CanaryStatus{
@ -35,7 +36,8 @@ func TestDeploymentController_SyncStatus(t *testing.T) {
}
func TestDeploymentController_SetFailedChecks(t *testing.T) {
mocks := newDeploymentFixture()
dc := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"}
mocks := newDeploymentFixture(dc)
mocks.initializeCanary(t)
err := mocks.controller.SetStatusFailedChecks(mocks.canary, 1)
@ -47,7 +49,8 @@ func TestDeploymentController_SetFailedChecks(t *testing.T) {
}
func TestDeploymentController_SetState(t *testing.T) {
mocks := newDeploymentFixture()
dc := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"}
mocks := newDeploymentFixture(dc)
mocks.initializeCanary(t)
err := mocks.controller.SetStatusPhase(mocks.canary, flaggerv1.CanaryPhaseProgressing)

View File

@ -8,34 +8,38 @@ import (
)
type Factory struct {
kubeClient kubernetes.Interface
flaggerClient clientset.Interface
logger *zap.SugaredLogger
configTracker Tracker
labels []string
kubeClient kubernetes.Interface
flaggerClient clientset.Interface
logger *zap.SugaredLogger
configTracker Tracker
labels []string
includeLabelPrefix []string
}
func NewFactory(kubeClient kubernetes.Interface,
flaggerClient clientset.Interface,
configTracker Tracker,
labels []string,
includeLabelPrefix []string,
logger *zap.SugaredLogger) *Factory {
return &Factory{
kubeClient: kubeClient,
flaggerClient: flaggerClient,
logger: logger,
configTracker: configTracker,
labels: labels,
kubeClient: kubeClient,
flaggerClient: flaggerClient,
logger: logger,
configTracker: configTracker,
labels: labels,
includeLabelPrefix: includeLabelPrefix,
}
}
func (factory *Factory) Controller(kind string) Controller {
deploymentCtrl := &DeploymentController{
logger: factory.logger,
kubeClient: factory.kubeClient,
flaggerClient: factory.flaggerClient,
labels: factory.labels,
configTracker: factory.configTracker,
logger: factory.logger,
kubeClient: factory.kubeClient,
flaggerClient: factory.flaggerClient,
labels: factory.labels,
configTracker: factory.configTracker,
includeLabelPrefix: factory.includeLabelPrefix,
}
daemonSetCtrl := &DaemonSetController{
logger: factory.logger,

View File

@ -42,9 +42,9 @@ func (c *ServiceController) SetStatusPhase(cd *flaggerv1.Canary, phase flaggerv1
return setStatusPhase(c.flaggerClient, cd, phase)
}
// GetMetadata returns the pod label selector and svc ports
func (c *ServiceController) GetMetadata(_ *flaggerv1.Canary) (string, map[string]int32, error) {
return "", nil, nil
// GetMetadata returns the pod label selector, label value and svc ports
func (c *ServiceController) GetMetadata(_ *flaggerv1.Canary) (string, string, map[string]int32, error) {
return "", "", nil, nil
}
// Initialize creates or updates the primary and canary services to prepare for the canary release process targeted on the K8s service

View File

@ -4,6 +4,7 @@ import (
"crypto/rand"
"fmt"
"io"
"strings"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/intstr"
@ -75,14 +76,28 @@ func makeAnnotations(annotations map[string]string) (map[string]string, error) {
return res, nil
}
func makePrimaryLabels(labels map[string]string, primaryName string, label string) map[string]string {
func includeLabelsByPrefix(labels map[string]string, includeLabelPrefixes []string) map[string]string {
filteredLabels := make(map[string]string)
for key, value := range labels {
for _, includeLabelPrefix := range includeLabelPrefixes {
if includeLabelPrefix == "*" || strings.HasPrefix(key, includeLabelPrefix) {
filteredLabels[key] = value
break
}
}
}
return filteredLabels
}
func makePrimaryLabels(labels map[string]string, labelValue string, label string) map[string]string {
res := make(map[string]string)
for k, v := range labels {
if k != label {
res[k] = v
}
}
res[label] = primaryName
res[label] = labelValue
return res
}

55
pkg/canary/util_test.go Normal file
View File

@ -0,0 +1,55 @@
package canary
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestIncludeLabelsByPrefix(t *testing.T) {
labels := map[string]string{
"foo": "foo-value",
"bar": "bar-value",
"lorem": "ipsum",
}
includeLabelPrefix := []string{"foo", "lor"}
filteredLabels := includeLabelsByPrefix(labels, includeLabelPrefix)
assert.Equal(t, filteredLabels, map[string]string{
"foo": "foo-value",
"lorem": "ipsum",
// bar excluded
})
}
func TestIncludeLabelsByPrefixWithWildcard(t *testing.T) {
labels := map[string]string{
"foo": "foo-value",
"bar": "bar-value",
"lorem": "ipsum",
}
includeLabelPrefix := []string{"*"}
filteredLabels := includeLabelsByPrefix(labels, includeLabelPrefix)
assert.Equal(t, filteredLabels, map[string]string{
"foo": "foo-value",
"bar": "bar-value",
"lorem": "ipsum",
})
}
func TestMakePrimaryLabels(t *testing.T) {
labels := map[string]string{
"lorem": "ipsum",
"foo": "old-bar",
}
primaryLabels := makePrimaryLabels(labels, "new-bar", "foo")
assert.Equal(t, primaryLabels, map[string]string{
"lorem": "ipsum", // values from old map
"foo": "new-bar", // overriden value for a specific label
})
}

View File

@ -50,13 +50,13 @@ func (c *Controller) finalize(old interface{}) error {
return fmt.Errorf("canary not ready during finalizing: %w", err)
}
labelSelector, ports, err := canaryController.GetMetadata(canary)
labelSelector, labelValue, ports, err := canaryController.GetMetadata(canary)
if err != nil {
return fmt.Errorf("failed to get metadata for router finalizing: %w", err)
}
// Revert the Kubernetes service
router := c.routerFactory.KubernetesRouter(canary.Spec.TargetRef.Kind, labelSelector, ports)
router := c.routerFactory.KubernetesRouter(canary.Spec.TargetRef.Kind, labelSelector, labelValue, ports)
if err := router.Finalize(canary); err != nil {
return fmt.Errorf("failed revert router: %w", err)
}

View File

@ -139,14 +139,14 @@ func (c *Controller) advanceCanary(name string, namespace string) {
// init controller based on target kind
canaryController := c.canaryFactory.Controller(cd.Spec.TargetRef.Kind)
labelSelector, ports, err := canaryController.GetMetadata(cd)
labelSelector, labelValue, ports, err := canaryController.GetMetadata(cd)
if err != nil {
c.recordEventWarningf(cd, "%v", err)
return
}
// init Kubernetes router
kubeRouter := c.routerFactory.KubernetesRouter(cd.Spec.TargetRef.Kind, labelSelector, ports)
kubeRouter := c.routerFactory.KubernetesRouter(cd.Spec.TargetRef.Kind, labelSelector, labelValue, ports)
// reconcile the canary/primary services
if err := kubeRouter.Initialize(cd); err != nil {
@ -271,7 +271,7 @@ func (c *Controller) advanceCanary(name string, namespace string) {
}
// check if analysis should be skipped
if skip := c.shouldSkipAnalysis(cd, canaryController, meshRouter); skip {
if skip := c.shouldSkipAnalysis(cd, canaryController, meshRouter, err, retriable); skip {
return
}
@ -654,11 +654,20 @@ func (c *Controller) runAnalysis(canary *flaggerv1.Canary) bool {
return true
}
func (c *Controller) shouldSkipAnalysis(canary *flaggerv1.Canary, canaryController canary.Controller, meshRouter router.Interface) bool {
func (c *Controller) shouldSkipAnalysis(canary *flaggerv1.Canary, canaryController canary.Controller, meshRouter router.Interface, err error, retriable bool) bool {
if !canary.SkipAnalysis() {
return false
}
// regardless if analysis is being skipped, rollback if canary failed to progress
if !retriable || canary.Status.FailedChecks >= canary.GetAnalysisThreshold() {
c.recordEventWarningf(canary, "Rolling back %s.%s progress deadline exceeded %v", canary.Name, canary.Namespace, err)
c.alert(canary, fmt.Sprintf("Progress deadline exceeded %v", err), false, flaggerv1.SeverityError)
c.rollback(canary, canaryController, meshRouter)
return true
}
// route all traffic to primary
primaryWeight := c.fullWeight(canary)
canaryWeight := 0

View File

@ -87,7 +87,7 @@ func newDaemonSetFixture(c *flaggerv1.Canary) daemonSetFixture {
KubeClient: kubeClient,
FlaggerClient: flaggerClient,
}
canaryFactory := canary.NewFactory(kubeClient, flaggerClient, configTracker, []string{"app", "name"}, logger)
canaryFactory := canary.NewFactory(kubeClient, flaggerClient, configTracker, []string{"app", "name"}, []string{""}, logger)
ctrl := &Controller{
kubeClient: kubeClient,

View File

@ -115,7 +115,7 @@ func newDeploymentFixture(c *flaggerv1.Canary) fixture {
KubeClient: kubeClient,
FlaggerClient: flaggerClient,
}
canaryFactory := canary.NewFactory(kubeClient, flaggerClient, configTracker, []string{"app", "name"}, logger)
canaryFactory := canary.NewFactory(kubeClient, flaggerClient, configTracker, []string{"app", "name"}, []string{""}, logger)
ctrl := &Controller{
kubeClient: kubeClient,

View File

@ -65,7 +65,7 @@ func (c *Controller) checkMetricProviderAvailability(canary *flaggerv1.Canary) e
}
if ok, err := provider.IsOnline(); !ok || err != nil {
return fmt.Errorf("%v in metric tempalte %s.%s not avaiable: %v", template.Spec.Provider.Type,
return fmt.Errorf("%v in metric template %s.%s not avaiable: %v", template.Spec.Provider.Type,
template.Name, template.Namespace, err)
}
}

View File

@ -18,6 +18,8 @@ func (factory Factory) Provider(
return NewDatadogProvider(metricInterval, provider, credentials)
case "cloudwatch":
return NewCloudWatchProvider(metricInterval, provider)
case "newrelic":
return NewNewRelicProvider(metricInterval, provider, credentials)
default:
return NewPrometheusProvider(provider, credentials)
}

View File

@ -0,0 +1,159 @@
package providers
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"time"
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1beta1"
)
const (
newrelicInsightsDefaultHost = "https://insights-api.newrelic.com"
newrelicQueryKeySecretKey = "newrelic_query_key"
newrelicAccountIdSecretKey = "newrelic_account_id"
newrelicQueryKeyHeaderKey = "X-Query-Key"
)
// NewRelicProvider executes newrelic queries
type NewRelicProvider struct {
insightsQueryEndpoint string
timeout time.Duration
queryKey string
fromDelta int64
}
type newRelicResponse struct {
Results []struct {
Result *float64 `json:"result"`
} `json:"results"`
}
// NewNewRelicProvider takes a canary spec, a provider spec and the credentials map, and
// returns a NewRelic client ready to execute queries against the Insights API
func NewNewRelicProvider(
metricInterval string,
provider flaggerv1.MetricTemplateProvider,
credentials map[string][]byte,
) (*NewRelicProvider, error) {
address := provider.Address
if address == "" {
address = newrelicInsightsDefaultHost
}
accountId, ok := credentials[newrelicAccountIdSecretKey]
if !ok {
return nil, fmt.Errorf("newrelic credentials does not contain the key '%s'", newrelicAccountIdSecretKey)
}
queryEndpoint := fmt.Sprintf("%s/v1/accounts/%s/query", address, accountId)
nr := NewRelicProvider{
timeout: 5 * time.Second,
insightsQueryEndpoint: queryEndpoint,
}
if b, ok := credentials[newrelicQueryKeySecretKey]; ok {
nr.queryKey = string(b)
} else {
return nil, fmt.Errorf("newrelic credentials does not contain the key ''%s", newrelicQueryKeySecretKey)
}
md, err := time.ParseDuration(metricInterval)
if err != nil {
return nil, fmt.Errorf("error parsing metric interval: %w", err)
}
nr.fromDelta = int64(md.Seconds())
return &nr, nil
}
// RunQuery executes the new relic query against the New Relic Insights API
// and returns the the first result
func (p *NewRelicProvider) RunQuery(query string) (float64, error) {
req, err := p.newInsightsRequest(query)
if err != nil {
return 0, err
}
ctx, cancel := context.WithTimeout(req.Context(), p.timeout)
defer cancel()
r, err := http.DefaultClient.Do(req.WithContext(ctx))
if err != nil {
return 0, fmt.Errorf("request failed: %w", err)
}
defer r.Body.Close()
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return 0, fmt.Errorf("error reading body: %w", err)
}
if r.StatusCode != http.StatusOK {
return 0, fmt.Errorf("error response: %s: %w", string(b), err)
}
var res newRelicResponse
if err := json.Unmarshal(b, &res); err != nil {
return 0, fmt.Errorf("error unmarshaling result: %w, '%s'", err, string(b))
}
if len(res.Results) != 1 {
return 0, fmt.Errorf("invalid response: %s: %w", string(b), ErrNoValuesFound)
}
if res.Results[0].Result == nil {
return 0, fmt.Errorf("invalid response: %s: %w", string(b), ErrNoValuesFound)
}
return *res.Results[0].Result, nil
}
// IsOnline calls the NewRelic's insights API with
// and returns an error if the request is rejected
func (p *NewRelicProvider) IsOnline() (bool, error) {
req, err := p.newInsightsRequest("SELECT * FROM Metric")
if err != nil {
return false, fmt.Errorf("error http.NewRequest: %w", err)
}
ctx, cancel := context.WithTimeout(req.Context(), p.timeout)
defer cancel()
r, err := http.DefaultClient.Do(req.WithContext(ctx))
if err != nil {
return false, fmt.Errorf("request failed: %w", err)
}
defer r.Body.Close()
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return false, fmt.Errorf("error reading body: %w", err)
}
if r.StatusCode != http.StatusOK {
return false, fmt.Errorf("error response: %s", string(b))
}
return true, nil
}
func (p *NewRelicProvider) newInsightsRequest(query string) (*http.Request, error) {
req, err := http.NewRequest("GET", p.insightsQueryEndpoint, nil)
if err != nil {
return nil, fmt.Errorf("error http.NewRequest: %w", err)
}
req.Header.Set(newrelicQueryKeyHeaderKey, p.queryKey)
q := req.URL.Query()
q.Add("nrql", fmt.Sprintf("%s SINCE %d seconds ago", query, p.fromDelta))
req.URL.RawQuery = q.Encode()
return req, nil
}

View File

@ -0,0 +1,126 @@
package providers
import (
"errors"
"fmt"
"net/http"
"net/http/httptest"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1beta1"
)
func TestNewNewRelicProvider(t *testing.T) {
queryKey := "query-key"
accountId := "51312"
cs := map[string][]byte{
"newrelic_query_key": []byte(queryKey),
"newrelic_account_id": []byte(accountId),
}
duration := "100s"
secondsDuration, err := time.ParseDuration(duration)
require.NoError(t, err)
nr, err := NewNewRelicProvider("100s", flaggerv1.MetricTemplateProvider{}, cs)
require.NoError(t, err)
assert.Equal(t, "https://insights-api.newrelic.com/v1/accounts/51312/query", nr.insightsQueryEndpoint)
assert.Equal(t, int64(secondsDuration.Seconds()), nr.fromDelta)
assert.Equal(t, queryKey, nr.queryKey)
}
func TestNewRelicProvider_RunQuery(t *testing.T) {
queryKey := "query-key"
accountId := "51312"
t.Run("ok", func(t *testing.T) {
q := `SELECT sum(nginx_ingress_controller_requests) / 1 FROM Metric WHERE status = '200'`
eq := `SELECT sum(nginx_ingress_controller_requests) / 1 FROM Metric WHERE status = '200' SINCE 60 seconds ago`
er := 1.11111
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
aq := r.URL.Query().Get("nrql")
assert.Equal(t, eq, aq)
assert.Equal(t, queryKey, r.Header.Get(newrelicQueryKeyHeaderKey))
json := fmt.Sprintf(`{"results":[{"result": %f}]}`, er)
w.Write([]byte(json))
}))
defer ts.Close()
nr, err := NewNewRelicProvider("1m",
flaggerv1.MetricTemplateProvider{
Address: ts.URL,
},
map[string][]byte{
"newrelic_query_key": []byte(queryKey),
"newrelic_account_id": []byte(accountId),
},
)
require.NoError(t, err)
f, err := nr.RunQuery(q)
assert.NoError(t, err)
assert.Equal(t, er, f)
})
t.Run("no values", func(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
json := fmt.Sprintf(`{"results": []}`)
w.Write([]byte(json))
}))
defer ts.Close()
dp, err := NewNewRelicProvider(
"1m",
flaggerv1.MetricTemplateProvider{Address: ts.URL},
map[string][]byte{
"newrelic_query_key": []byte(queryKey),
"newrelic_account_id": []byte(accountId)},
)
require.NoError(t, err)
_, err = dp.RunQuery("")
require.True(t, errors.Is(err, ErrNoValuesFound))
})
}
func TestNewReelicProvider_IsOnline(t *testing.T) {
for _, c := range []struct {
code int
errExpected bool
}{
{code: http.StatusOK, errExpected: false},
{code: http.StatusUnauthorized, errExpected: true},
} {
t.Run(fmt.Sprintf("%d", c.code), func(t *testing.T) {
queryKey := "query-key"
accountId := "51312"
query := `SELECT * FROM Metric SINCE 60 seconds ago`
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
assert.Equal(t, queryKey, r.Header.Get(newrelicQueryKeyHeaderKey))
assert.Equal(t, query, r.URL.Query().Get("nrql"))
w.WriteHeader(c.code)
}))
defer ts.Close()
dp, err := NewNewRelicProvider(
"1m",
flaggerv1.MetricTemplateProvider{Address: ts.URL},
map[string][]byte{
"newrelic_query_key": []byte(queryKey),
"newrelic_account_id": []byte(accountId),
},
)
require.NoError(t, err)
_, err = dp.IsOnline()
if c.errExpected {
require.Error(t, err)
} else {
require.NoError(t, err)
}
})
}
}

View File

@ -39,7 +39,7 @@ func NewFactory(kubeConfig *restclient.Config, kubeClient kubernetes.Interface,
}
// KubernetesRouter returns a KubernetesRouter interface implementation
func (factory *Factory) KubernetesRouter(kind string, labelSelector string, ports map[string]int32) KubernetesRouter {
func (factory *Factory) KubernetesRouter(kind string, labelSelector string, labelValue string, ports map[string]int32) KubernetesRouter {
switch kind {
case "Service":
return &KubernetesNoopRouter{}
@ -49,6 +49,7 @@ func (factory *Factory) KubernetesRouter(kind string, labelSelector string, port
flaggerClient: factory.flaggerClient,
kubeClient: factory.kubeClient,
labelSelector: labelSelector,
labelValue: labelValue,
ports: ports,
}
}

View File

@ -98,6 +98,13 @@ func (ir *IstioRouter) reconcileDestinationRule(canary *flaggerv1.Canary, name s
func (ir *IstioRouter) reconcileVirtualService(canary *flaggerv1.Canary) error {
apexName, primaryName, canaryName := canary.GetServiceNames()
if canary.Spec.Service.Delegation {
if len(canary.Spec.Service.Hosts) > 0 || len(canary.Spec.Service.Gateways) > 0 {
// delegate VirtualService cannot have hosts and gateways.
return fmt.Errorf("VirtualService %s.%s cannot have hosts and gateways when delegation enabled", apexName, canary.Namespace)
}
}
// set hosts and add the ClusterIP service host if it doesn't exists
hosts := canary.Spec.Service.Hosts
var hasServiceHost bool
@ -132,6 +139,12 @@ func (ir *IstioRouter) reconcileVirtualService(canary *flaggerv1.Canary) error {
makeDestination(canary, canaryName, 0),
}
if canary.Spec.Service.Delegation {
// delegate VirtualService requires the hosts and gateway empty.
hosts = []string{}
gateways = []string{}
}
newSpec := istiov1alpha3.VirtualServiceSpec{
Hosts: hosts,
Gateways: gateways,

View File

@ -333,6 +333,53 @@ func TestIstioRouter_GatewayPort(t *testing.T) {
assert.Equal(t, uint32(mocks.canary.Spec.Service.Port), port)
}
func TestIstioRouter_Delegate(t *testing.T) {
t.Run("ok", func(t *testing.T) {
mocks := newFixture(nil)
mocks.canary.Spec.Service.Hosts = []string{}
mocks.canary.Spec.Service.Gateways = []string{}
mocks.canary.Spec.Service.Delegation = true
router := &IstioRouter{
logger: mocks.logger,
flaggerClient: mocks.flaggerClient,
istioClient: mocks.meshClient,
kubeClient: mocks.kubeClient,
}
err := router.Reconcile(mocks.canary)
require.NoError(t, err)
vs, err := mocks.meshClient.NetworkingV1alpha3().VirtualServices("default").Get(context.TODO(), "podinfo", metav1.GetOptions{})
require.NoError(t, err)
assert.Equal(t, 0, len(vs.Spec.Hosts))
assert.Equal(t, 0, len(vs.Spec.Gateways))
})
t.Run("invalid", func(t *testing.T) {
mocks := newFixture(nil)
if len(mocks.canary.Spec.Service.Gateways) == 0 {
// in this case, the gateways or hosts should not be not empty because it requires to cause an error.
mocks.canary.Spec.Service.Gateways = []string{
"public-gateway.istio",
"mesh",
}
}
mocks.canary.Spec.Service.Delegation = true
router := &IstioRouter{
logger: mocks.logger,
flaggerClient: mocks.flaggerClient,
istioClient: mocks.meshClient,
kubeClient: mocks.kubeClient,
}
err := router.Reconcile(mocks.canary)
require.Error(t, err)
})
}
func TestIstioRouter_Finalize(t *testing.T) {
mocks := newFixture(nil)
router := &IstioRouter{

View File

@ -25,6 +25,7 @@ type KubernetesDefaultRouter struct {
flaggerClient clientset.Interface
logger *zap.SugaredLogger
labelSelector string
labelValue string
ports map[string]int32
}
@ -33,13 +34,13 @@ func (c *KubernetesDefaultRouter) Initialize(canary *flaggerv1.Canary) error {
_, primaryName, canaryName := canary.GetServiceNames()
// canary svc
err := c.reconcileService(canary, canaryName, canary.Spec.TargetRef.Name, canary.Spec.Service.Canary)
err := c.reconcileService(canary, canaryName, c.labelValue, canary.Spec.Service.Canary)
if err != nil {
return fmt.Errorf("reconcileService failed: %w", err)
}
// primary svc
err = c.reconcileService(canary, primaryName, fmt.Sprintf("%s-primary", canary.Spec.TargetRef.Name), canary.Spec.Service.Primary)
err = c.reconcileService(canary, primaryName, fmt.Sprintf("%s-primary", c.labelValue), canary.Spec.Service.Primary)
if err != nil {
return fmt.Errorf("reconcileService failed: %w", err)
}
@ -52,7 +53,7 @@ func (c *KubernetesDefaultRouter) Reconcile(canary *flaggerv1.Canary) error {
apexName, _, _ := canary.GetServiceNames()
// main svc
err := c.reconcileService(canary, apexName, fmt.Sprintf("%s-primary", canary.Spec.TargetRef.Name), canary.Spec.Service.Apex)
err := c.reconcileService(canary, apexName, fmt.Sprintf("%s-primary", c.labelValue), canary.Spec.Service.Apex)
if err != nil {
return fmt.Errorf("reconcileService failed: %w", err)
}

View File

@ -4,6 +4,7 @@ import (
"context"
"encoding/json"
"fmt"
"strings"
"github.com/google/go-cmp/cmp"
"go.uber.org/zap"
@ -176,7 +177,8 @@ func (skp *SkipperRouter) SetRoutes(canary *flaggerv1.Canary, primaryWeight, can
// Disable the canary-ingress route after the canary process
if canaryWeight == 0 {
iClone.Annotations[skipperpredicateAnnotationKey] = canaryRouteDisable
// ensuring False() is at first place
iClone.Annotations[skipperpredicateAnnotationKey] = insertPredicate(iClone.Annotations[skipperpredicateAnnotationKey], canaryRouteDisable)
}
_, err = skp.kubeClient.NetworkingV1beta1().Ingresses(canary.Namespace).Update(
@ -212,7 +214,7 @@ func (skp *SkipperRouter) makeAnnotations(annotations map[string]string, backend
}
annotations[skipperBackendWeightsAnnotationKey] = string(b)
// adding more weight to canary route solves traffic bypassing through apexIngress
annotations[skipperpredicateAnnotationKey] = canaryRouteWeight
annotations[skipperpredicateAnnotationKey] = insertPredicate(annotations[skipperpredicateAnnotationKey], canaryRouteWeight)
return annotations
}
@ -233,3 +235,19 @@ func (skp *SkipperRouter) backendWeights(annotation map[string]string) (backendW
func (skp *SkipperRouter) getIngressNames(name string) (apexName, canaryName string) {
return name, fmt.Sprintf(canaryPatternf, name)
}
func insertPredicate(raw, insert string) string {
// ensuring it at first place
predicates := []string{insert}
for _, x := range strings.Split(raw, "&&") {
predicate := strings.TrimSpace(x)
// dropping conflicting predicates
if predicate == "" ||
predicate == canaryRouteWeight ||
predicate == canaryRouteDisable {
continue
}
predicates = append(predicates, predicate)
}
return strings.Join(predicates, " && ")
}

View File

@ -105,3 +105,49 @@ func TestSkipperRouter_GetSetRoutes(t *testing.T) {
}
}
func Test_insertPredicate(t *testing.T) {
tests := []struct {
name string
raw string
insert string
want string
}{
{
name: "a few Predicates lined up",
raw: `Host(/^my-host-header\.example\.org$/) && Method("GET") && Path("/hello")`,
insert: "Weight(100)",
want: `Weight(100) && Host(/^my-host-header\.example\.org$/) && Method("GET") && Path("/hello")`,
},
{
name: "adds Predicate if none is set",
raw: "",
insert: "Weight(100)",
want: `Weight(100)`,
},
{
name: "removes duplicated Predicate Weight(100)",
raw: `Weight(100) && Host(/^my-host-header\.example\.org$/) && Method("GET") && Path("/hello")`,
insert: "Weight(100)",
want: `Weight(100) && Host(/^my-host-header\.example\.org$/) && Method("GET") && Path("/hello")`,
},
{
name: "removes duplicated Predicate False() and reorders them",
raw: `Host(/^my-host-header\.example\.org$/) && Method("GET") && Path("/hello")&&False()`,
insert: "False()",
want: `False() && Host(/^my-host-header\.example\.org$/) && Method("GET") && Path("/hello")`,
},
{
name: "removes conflicting Predicate False()",
raw: `Host(/^my-host-header\.example\.org$/) && False() && Method("GET") && Path("/hello")`,
insert: "Weight(100)",
want: `Weight(100) && Host(/^my-host-header\.example\.org$/) && Method("GET") && Path("/hello")`,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, tt.want, insertPredicate(tt.raw, tt.insert))
})
}
}

View File

@ -1,4 +1,4 @@
package version
var VERSION = "1.1.0"
var VERSION = "1.2.0"
var REVISION = "unknown"

View File

@ -85,6 +85,34 @@ spec:
logCmdOutput: "true"
EOF
cat <<EOF | kubectl apply -f -
apiVersion: flagger.app/v1beta1
kind: Canary
metadata:
name: podinfo-service
namespace: test
spec:
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo-service
progressDeadlineSeconds: 60
service:
port: 9898
portDiscovery: true
headers:
request:
add:
x-envoy-upstream-rq-timeout-ms: "15000"
x-envoy-max-retries: "10"
x-envoy-retry-on: "gateway-error,connect-failure,refused-stream"
analysis:
interval: 15s
threshold: 15
maxWeight: 30
stepWeight: 10
EOF
echo '>>> Waiting for primary to be ready'
retries=50
count=0
@ -104,6 +132,19 @@ kubectl -n test get httpproxy podinfo -oyaml | grep 'projectcontour.io/ingress.c
echo '✔ Canary initialization test passed'
passed=$(kubectl -n test get svc/podinfo -o jsonpath='{.spec.selector.app}' 2>&1 | { grep podinfo-primary || true; })
if [ -z "$passed" ]; then
echo -e '\u2716 podinfo selector test failed'
exit 1
fi
passed=$(kubectl -n test get svc/podinfo-service-canary -o jsonpath='{.spec.selector.app}' 2>&1 | { grep podinfo || true; })
if [ -z "$passed" ]; then
echo -e '\u2716 podinfo-service selector test failed'
exit 1
fi
echo '✔ Canary service custom metadata test passed'
echo '>>> Triggering canary deployment'
kubectl -n test set image deployment/podinfo podinfod=stefanprodan/podinfo:3.1.1

View File

@ -88,6 +88,34 @@ spec:
logCmdOutput: "true"
EOF
cat <<EOF | kubectl apply -f -
apiVersion: flagger.app/v1beta1
kind: Canary
metadata:
name: podinfo-service
namespace: test
spec:
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo-service
progressDeadlineSeconds: 60
service:
port: 9898
portDiscovery: true
headers:
request:
add:
x-envoy-upstream-rq-timeout-ms: "15000"
x-envoy-max-retries: "10"
x-envoy-retry-on: "gateway-error,connect-failure,refused-stream"
analysis:
interval: 15s
threshold: 15
maxWeight: 30
stepWeight: 10
EOF
echo '>>> Waiting for primary to be ready'
retries=50
count=0
@ -105,6 +133,19 @@ done
echo '✔ Canary initialization test passed'
passed=$(kubectl -n test get svc/podinfo -o jsonpath='{.spec.selector.app}' 2>&1 | { grep podinfo-primary || true; })
if [ -z "$passed" ]; then
echo -e '\u2716 podinfo selector test failed'
exit 1
fi
passed=$(kubectl -n test get svc/podinfo-service-canary -o jsonpath='{.spec.selector.app}' 2>&1 | { grep podinfo || true; })
if [ -z "$passed" ]; then
echo -e '\u2716 podinfo-service selector test failed'
exit 1
fi
echo '✔ Canary service custom metadata test passed'
echo '>>> Triggering canary deployment'
kubectl -n test set image deployment/podinfo podinfod=stefanprodan/podinfo:3.1.1

19
test/e2e-istio-dependencies.sh Executable file
View File

@ -0,0 +1,19 @@
#!/usr/bin/env bash
# This script setups the scenarios for istio tests by creating a Kubernetes namespace, installing the load tester and a test workload (podinfo)
# Prerequisites: Kubernetes Kind and Istio
set -o errexit
REPO_ROOT=$(git rev-parse --show-toplevel)
echo '>>> Creating test namespace'
kubectl create namespace test
kubectl label namespace test istio-injection=enabled
echo '>>> Installing the load tester'
kubectl apply -k ${REPO_ROOT}/kustomize/tester
kubectl -n test rollout status deployment/flagger-loadtester
echo '>>> Deploy podinfo'
kubectl apply -f ${REPO_ROOT}/test/e2e-workload.yaml

150
test/e2e-istio-tests-delegate.sh Executable file
View File

@ -0,0 +1,150 @@
#!/usr/bin/env bash
# This script runs e2e tests for when the canary delegation is enabled
# Prerequisites: Kubernetes Kind and Istio
set -o errexit
echo '>>> Set pilot env to enable virtual service delegate'
kubectl -n istio-system set env deploy istiod PILOT_ENABLE_VIRTUAL_SERVICE_DELEGATE=true
kubectl -n istio-system rollout status deploy istiod
echo '>>> Initialising Gateway'
cat <<EOF | kubectl apply -f -
apiVersion: networking.istio.io/v1alpha3
kind: Gateway
metadata:
name: istio-ingressgateway
namespace: istio-system
spec:
selector:
app: istio-ingressgateway
istio: ingressgateway
servers:
- port:
number: 80
name: http
protocol: HTTP
hosts:
- "*"
EOF
echo '>>> Initialising root virtual service'
cat <<EOF | kubectl apply -f -
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: root-vs
namespace: test
spec:
gateways:
- istio-system/istio-ingressgateway
hosts:
- "*"
http:
- match:
- uri:
prefix: "/podinfo"
rewrite:
uri: "/"
delegate:
name: podinfo
namespace: test
EOF
echo '>>> Initialising canary for delegate'
cat <<EOF | kubectl apply -f -
apiVersion: flagger.app/v1beta1
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
progressDeadlineSeconds: 60
service:
port: 80
targetPort: 9898
portDiscovery: true
delegation: true
skipAnalysis: true
analysis:
interval: 15s
threshold: 15
maxWeight: 30
stepWeight: 10
webhooks:
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
type: cmd
cmd: "hey -z 10m -q 10 -c 2 http://istio-ingressgateway.istio-system/podinfo"
logCmdOutput: "true"
EOF
echo '>>> Waiting for primary to be ready'
retries=50
count=0
ok=false
until ${ok}; do
kubectl -n test get canary/podinfo | grep 'Initialized' && ok=true || ok=false
sleep 5
count=$(($count + 1))
if [[ ${count} -eq ${retries} ]]; then
kubectl -n istio-system logs deployment/flagger
echo "No more retries left"
exit 1
fi
done
echo '✔ Canary initialization test passed'
echo '>>> Triggering canary deployment'
kubectl -n test set image deployment/podinfo podinfod=stefanprodan/podinfo:3.1.1
echo '>>> Waiting for canary promotion'
retries=50
count=0
ok=false
until ${ok}; do
kubectl -n test describe deployment/podinfo-primary | grep '3.1.1' && ok=true || ok=false
sleep 10
kubectl -n istio-system logs deployment/flagger --tail 1
count=$(($count + 1))
if [[ ${count} -eq ${retries} ]]; then
kubectl -n test describe deployment/podinfo
kubectl -n test describe deployment/podinfo-primary
kubectl -n istio-system logs deployment/flagger
echo "No more retries left"
exit 1
fi
done
echo '>>> Waiting for canary finalization'
retries=50
count=0
ok=false
until ${ok}; do
kubectl -n test get canary/podinfo | grep 'Succeeded' && ok=true || ok=false
sleep 5
count=$(($count + 1))
if [[ ${count} -eq ${retries} ]]; then
kubectl -n istio-system logs deployment/flagger
echo "No more retries left"
exit 1
fi
done
echo '>>> Set pilot env to disable virtual service delegate'
kubectl -n istio-system set env deploy istiod PILOT_ENABLE_VIRTUAL_SERVICE_DELEGATE=false
kubectl -n istio-system rollout status deploy istiod
echo '✔ Canary promotion test passed'
if [[ "$1" = "canary" ]]; then
exit 0
fi

View File

@ -0,0 +1,138 @@
#!/usr/bin/env bash
# This script runs e2e tests for when the canary analysis is skipped
# Prerequisites: Kubernetes Kind and Istio
set -o errexit
echo '>>> Initialising canary'
cat <<EOF | kubectl apply -f -
apiVersion: flagger.app/v1beta1
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
progressDeadlineSeconds: 60
service:
port: 9898
portDiscovery: true
skipAnalysis: true
analysis:
interval: 15s
threshold: 15
maxWeight: 30
stepWeight: 10
webhooks:
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
type: cmd
cmd: "hey -z 10m -q 10 -c 2 http://podinfo.test:9898/"
logCmdOutput: "true"
EOF
echo '>>> Waiting for primary to be ready'
retries=50
count=0
ok=false
until ${ok}; do
kubectl -n test get canary/podinfo | grep 'Initialized' && ok=true || ok=false
sleep 5
count=$(($count + 1))
if [[ ${count} -eq ${retries} ]]; then
kubectl -n istio-system logs deployment/flagger
echo "No more retries left"
exit 1
fi
done
echo '✔ Canary initialization test passed'
echo '>>> Triggering canary deployment'
kubectl -n test set image deployment/podinfo podinfod=stefanprodan/podinfo:3.1.1
echo '>>> Waiting for canary promotion'
retries=50
count=0
ok=false
until ${ok}; do
kubectl -n test describe deployment/podinfo-primary | grep '3.1.1' && ok=true || ok=false
sleep 10
kubectl -n istio-system logs deployment/flagger --tail 1
count=$(($count + 1))
if [[ ${count} -eq ${retries} ]]; then
kubectl -n test describe deployment/podinfo
kubectl -n test describe deployment/podinfo-primary
kubectl -n istio-system logs deployment/flagger
echo "No more retries left"
exit 1
fi
done
echo '>>> Waiting for canary finalization'
retries=50
count=0
ok=false
until ${ok}; do
kubectl -n test get canary/podinfo | grep 'Succeeded' && ok=true || ok=false
sleep 5
count=$(($count + 1))
if [[ ${count} -eq ${retries} ]]; then
kubectl -n istio-system logs deployment/flagger
echo "No more retries left"
exit 1
fi
done
echo '✔ Canary promotion test passed'
if [[ "$1" = "canary" ]]; then
exit 0
fi
echo '>>> Triggering canary deployment with a bad release (non existent docker image)'
kubectl -n test set image deployment/podinfo podinfod=stefanprodan/potato:1.0.0
echo '>>> Waiting for canary to fail'
retries=50
count=0
ok=false
until ${ok}; do
kubectl get canary/podinfo -n test -o=jsonpath='{.status.phase}' | grep 'Failed' && ok=true || ok=false
sleep 10
kubectl -n istio-system logs deployment/flagger --tail 1
count=$(($count + 1))
if [[ ${count} -eq ${retries} ]]; then
kubectl -n test describe deployment/podinfo
kubectl -n test describe deployment/podinfo-primary
kubectl -n istio-system logs deployment/flagger
echo "No more retries left"
exit 1
fi
done
echo '>>> Confirm primary pod is still running and with correct version'
retries=50
count=0
ok=false
until ${okImage} && ${okRunning}; do
kubectl get deployment podinfo-primary -n test -o jsonpath='{.spec.replicas}' | grep 1 && okRunning=true || okRunning=false
kubectl -n test describe deployment/podinfo-primary | grep '3.1.3' && okImage=true || okImage=false
sleep 5
count=$(($count + 1))
if [[ ${count} -eq ${retries} ]]; then
kubectl -n istio-system logs deployment/flagger
echo "No more retries left"
exit 1
fi
done
kubectl -n istio-system logs deployment/flagger
echo '✔ All tests passed'

View File

@ -5,19 +5,6 @@
set -o errexit
REPO_ROOT=$(git rev-parse --show-toplevel)
echo '>>> Creating test namespace'
kubectl create namespace test
kubectl label namespace test istio-injection=enabled
echo '>>> Installing the load tester'
kubectl apply -k ${REPO_ROOT}/kustomize/tester
kubectl -n test rollout status deployment/flagger-loadtester
echo '>>> Deploy podinfo'
kubectl apply -f ${REPO_ROOT}/test/e2e-workload.yaml
echo '>>> Create latency metric template'
cat <<EOF | kubectl apply -f -
apiVersion: flagger.app/v1beta1
@ -44,7 +31,7 @@ spec:
)
EOF
echo '>>> Initialising canary'
echo '>>> Initialising canaries'
cat <<EOF | kubectl apply -f -
apiVersion: flagger.app/v1beta1
kind: Canary
@ -98,12 +85,41 @@ spec:
logCmdOutput: "true"
EOF
cat <<EOF | kubectl apply -f -
apiVersion: flagger.app/v1beta1
kind: Canary
metadata:
name: podinfo-service
namespace: test
spec:
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo-service
progressDeadlineSeconds: 60
service:
port: 9898
portDiscovery: true
headers:
request:
add:
x-envoy-upstream-rq-timeout-ms: "15000"
x-envoy-max-retries: "10"
x-envoy-retry-on: "gateway-error,connect-failure,refused-stream"
analysis:
interval: 15s
threshold: 15
maxWeight: 30
stepWeight: 10
EOF
echo '>>> Waiting for primary to be ready'
retries=50
count=0
ok=false
until ${ok}; do
kubectl -n test get canary/podinfo | grep 'Initialized' && ok=true || ok=false
kubectl -n test get canary/podinfo-service | grep 'Initialized' && ok=true || ok=false
sleep 5
count=$(($count + 1))
if [[ ${count} -eq ${retries} ]]; then
@ -115,8 +131,26 @@ done
echo '✔ Canary initialization test passed'
kubectl -n test get svc/podinfo -oyaml | grep annotations-test
kubectl -n test get svc/podinfo -oyaml | grep labels-test
passed=$(kubectl -n test get svc/podinfo -oyaml 2>&1 | { grep annotations-test || true; })
if [ -z "$passed" ]; then
echo -e '\u2716 podinfo annotations test failed'
exit 1
fi
passed=$(kubectl -n test get svc/podinfo -oyaml 2>&1 | { grep labels-test || true; })
if [ -z "$passed" ]; then
echo -e '\u2716 podinfo labels test failed'
exit 1
fi
passed=$(kubectl -n test get svc/podinfo -o jsonpath='{.spec.selector.app}' 2>&1 | { grep podinfo-primary || true; })
if [ -z "$passed" ]; then
echo -e '\u2716 podinfo selector test failed'
exit 1
fi
passed=$(kubectl -n test get svc/podinfo-service-canary -o jsonpath='{.spec.selector.app}' 2>&1 | { grep podinfo || true; })
if [ -z "$passed" ]; then
echo -e '\u2716 podinfo-service selector test failed'
exit 1
fi
echo '✔ Canary service custom metadata test passed'

View File

@ -2,7 +2,7 @@
set -o errexit
ISTIO_VER="1.6.7"
ISTIO_VER="1.7.3"
REPO_ROOT=$(git rev-parse --show-toplevel)
echo ">>> Downloading Istio ${ISTIO_VER}"
@ -10,8 +10,11 @@ cd ${REPO_ROOT}/bin && \
curl -L https://istio.io/downloadIstio | ISTIO_VERSION=${ISTIO_VER} sh -
echo ">>> Installing Istio ${ISTIO_VER}"
${REPO_ROOT}/bin/istio-${ISTIO_VER}/bin/istioctl manifest apply --set profile=default
${REPO_ROOT}/bin/istio-${ISTIO_VER}/bin/istioctl manifest install --set profile=default \
--set values.pilot.resources.requests.cpu=100m \
--set values.pilot.resources.requests.memory=100Mi
kubectl apply -f https://raw.githubusercontent.com/istio/istio/release-1.7/samples/addons/prometheus.yaml
kubectl -n istio-system rollout status deployment/prometheus
kubectl -n istio-system get all

View File

@ -100,6 +100,34 @@ spec:
logCmdOutput: "true"
EOF
cat <<EOF | kubectl apply -f -
apiVersion: flagger.app/v1beta1
kind: Canary
metadata:
name: podinfo-service
namespace: test
spec:
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo-service
progressDeadlineSeconds: 60
service:
port: 9898
portDiscovery: true
headers:
request:
add:
x-envoy-upstream-rq-timeout-ms: "15000"
x-envoy-max-retries: "10"
x-envoy-retry-on: "gateway-error,connect-failure,refused-stream"
analysis:
interval: 15s
threshold: 15
maxWeight: 30
stepWeight: 10
EOF
echo '>>> Waiting for primary to be ready'
retries=50
count=0
@ -117,6 +145,19 @@ done
echo '✔ Canary initialization test passed'
passed=$(kubectl -n test get svc/podinfo -o jsonpath='{.spec.selector.app}' 2>&1 | { grep podinfo-primary || true; })
if [ -z "$passed" ]; then
echo -e '\u2716 podinfo selector test failed'
exit 1
fi
passed=$(kubectl -n test get svc/podinfo-service-canary -o jsonpath='{.spec.selector.app}' 2>&1 | { grep podinfo || true; })
if [ -z "$passed" ]; then
echo -e '\u2716 podinfo-service selector test failed'
exit 1
fi
echo '✔ Canary service custom metadata test passed'
echo '>>> Triggering canary deployment'
kubectl -n test set image deployment/podinfo podinfod=stefanprodan/podinfo:3.1.1

View File

@ -124,6 +124,34 @@ spec:
cmd: "hey -z 2m -q 10 -c 2 -host app.example.com http://nginx-ingress-controller.ingress-nginx"
EOF
cat <<EOF | kubectl apply -f -
apiVersion: flagger.app/v1beta1
kind: Canary
metadata:
name: podinfo-service
namespace: test
spec:
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo-service
progressDeadlineSeconds: 60
service:
port: 9898
portDiscovery: true
headers:
request:
add:
x-envoy-upstream-rq-timeout-ms: "15000"
x-envoy-max-retries: "10"
x-envoy-retry-on: "gateway-error,connect-failure,refused-stream"
analysis:
interval: 15s
threshold: 15
maxWeight: 30
stepWeight: 10
EOF
echo '>>> Waiting for primary to be ready'
retries=50
count=0
@ -141,6 +169,19 @@ done
echo '✔ Canary initialization test passed'
passed=$(kubectl -n test get svc/podinfo -o jsonpath='{.spec.selector.app}' | { grep podinfo-primary || true; })
if [ -z "$passed" ]; then
echo -e '\u2716 podinfo selector test failed'
exit 1
fi
passed=$(kubectl -n test get svc/podinfo-service-canary -o jsonpath='{.spec.selector.app}' | { grep podinfo || true; })
if [ -z "$passed" ]; then
echo -e '\u2716 podinfo-service selector test failed'
exit 1
fi
echo '✔ Canary service custom metadata test passed'
echo '>>> Triggering canary deployment'
kubectl -n test set image deployment/podinfo podinfod=stefanprodan/podinfo:3.1.1

View File

@ -66,3 +66,72 @@ spec:
requests:
cpu: 1m
memory: 16Mi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: podinfo-service
namespace: test
labels:
app: podinfo
spec:
minReadySeconds: 5
revisionHistoryLimit: 5
progressDeadlineSeconds: 60
strategy:
rollingUpdate:
maxUnavailable: 0
type: RollingUpdate
selector:
matchLabels:
app: podinfo
template:
metadata:
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9797"
labels:
app: podinfo
spec:
containers:
- name: podinfod
image: stefanprodan/podinfo:3.1.0
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 9898
protocol: TCP
- name: http-metrics
containerPort: 9797
protocol: TCP
- name: grpc
containerPort: 9999
protocol: TCP
command:
- ./podinfo
- --port=9898
- --port-metrics=9797
- --grpc-port=9999
- --grpc-service-name=podinfo
- --level=info
- --random-delay=false
- --random-error=false
livenessProbe:
httpGet:
port: 9898
path: /healthz
initialDelaySeconds: 5
timeoutSeconds: 5
readinessProbe:
httpGet:
port: 9898
path: /readyz
initialDelaySeconds: 5
timeoutSeconds: 5
resources:
limits:
cpu: 1000m
memory: 128Mi
requests:
cpu: 1m
memory: 16Mi