From e7da8c3f3529ed1312129529115df2bb2bbbdece Mon Sep 17 00:00:00 2001 From: Samuel Lang Date: Wed, 26 Aug 2020 11:27:05 +0200 Subject: [PATCH 01/47] Skipper: preserve Predicates Current implementation did overwrite potentially existing Predicates. We face the situation that we need to add further Predicates which we need to keep in order to have a proper route setup --- pkg/router/skipper.go | 22 ++++++++++++++++-- pkg/router/skipper_test.go | 46 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 66 insertions(+), 2 deletions(-) diff --git a/pkg/router/skipper.go b/pkg/router/skipper.go index 6b5a8c7c..aed495a9 100644 --- a/pkg/router/skipper.go +++ b/pkg/router/skipper.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "strings" "github.com/google/go-cmp/cmp" "go.uber.org/zap" @@ -176,7 +177,8 @@ func (skp *SkipperRouter) SetRoutes(canary *flaggerv1.Canary, primaryWeight, can // Disable the canary-ingress route after the canary process if canaryWeight == 0 { - iClone.Annotations[skipperpredicateAnnotationKey] = canaryRouteDisable + // ensuring False() is at first place + iClone.Annotations[skipperpredicateAnnotationKey] = insertPredicate(iClone.Annotations[skipperpredicateAnnotationKey], canaryRouteDisable) } _, err = skp.kubeClient.NetworkingV1beta1().Ingresses(canary.Namespace).Update( @@ -212,7 +214,7 @@ func (skp *SkipperRouter) makeAnnotations(annotations map[string]string, backend } annotations[skipperBackendWeightsAnnotationKey] = string(b) // adding more weight to canary route solves traffic bypassing through apexIngress - annotations[skipperpredicateAnnotationKey] = canaryRouteWeight + annotations[skipperpredicateAnnotationKey] = insertPredicate(annotations[skipperpredicateAnnotationKey], canaryRouteWeight) return annotations } @@ -233,3 +235,19 @@ func (skp *SkipperRouter) backendWeights(annotation map[string]string) (backendW func (skp *SkipperRouter) getIngressNames(name string) (apexName, canaryName string) { return name, fmt.Sprintf(canaryPatternf, name) } + +func insertPredicate(raw, insert string) string { + // ensuring it at first place + predicates := []string{insert} + for _, x := range strings.Split(raw, "&&") { + predicate := strings.TrimSpace(x) + // dropping conflicting predicates + if predicate == "" || + predicate == canaryRouteWeight || + predicate == canaryRouteDisable { + continue + } + predicates = append(predicates, predicate) + } + return strings.Join(predicates, " && ") +} diff --git a/pkg/router/skipper_test.go b/pkg/router/skipper_test.go index e782f38e..6301af57 100644 --- a/pkg/router/skipper_test.go +++ b/pkg/router/skipper_test.go @@ -105,3 +105,49 @@ func TestSkipperRouter_GetSetRoutes(t *testing.T) { } } + +func Test_insertPredicate(t *testing.T) { + tests := []struct { + name string + raw string + insert string + want string + }{ + { + name: "a few Predicates lined up", + raw: `Host(/^my-host-header\.example\.org$/) && Method("GET") && Path("/hello")`, + insert: "Weight(100)", + want: `Weight(100) && Host(/^my-host-header\.example\.org$/) && Method("GET") && Path("/hello")`, + }, + { + name: "adds Predicate if none is set", + raw: "", + insert: "Weight(100)", + want: `Weight(100)`, + }, + { + name: "removes duplicated Predicate Weight(100)", + raw: `Weight(100) && Host(/^my-host-header\.example\.org$/) && Method("GET") && Path("/hello")`, + insert: "Weight(100)", + want: `Weight(100) && Host(/^my-host-header\.example\.org$/) && Method("GET") && Path("/hello")`, + }, + { + name: "removes duplicated Predicate False() and reorders them", + raw: `Host(/^my-host-header\.example\.org$/) && Method("GET") && Path("/hello")&&False()`, + insert: "False()", + want: `False() && Host(/^my-host-header\.example\.org$/) && Method("GET") && Path("/hello")`, + }, + { + name: "removes conflicting Predicate False()", + raw: `Host(/^my-host-header\.example\.org$/) && False() && Method("GET") && Path("/hello")`, + insert: "Weight(100)", + want: `Weight(100) && Host(/^my-host-header\.example\.org$/) && Method("GET") && Path("/hello")`, + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.want, insertPredicate(tt.raw, tt.insert)) + }) + } +} From 8e7aa29ef1055642a7b8c54361e3548b51fcf0e0 Mon Sep 17 00:00:00 2001 From: xichengliudui <1693291525@qq.com> Date: Wed, 2 Sep 2020 01:11:30 -0700 Subject: [PATCH 02/47] add istio 1.7 install command --- docs/gitbook/tutorials/istio-progressive-delivery.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/gitbook/tutorials/istio-progressive-delivery.md b/docs/gitbook/tutorials/istio-progressive-delivery.md index 26763fb8..507ef055 100644 --- a/docs/gitbook/tutorials/istio-progressive-delivery.md +++ b/docs/gitbook/tutorials/istio-progressive-delivery.md @@ -12,6 +12,8 @@ Install Istio with telemetry support and Prometheus: ```bash istioctl manifest apply --set profile=default +# istio 1.7 or newer +istioctl install --set profile=default ``` Install Flagger using Kustomize (kubectl >= 1.14) in the `istio-system` namespace: From f70f43bb3d940edbba64851d90e1843da0f8af39 Mon Sep 17 00:00:00 2001 From: Forrest Thomas Date: Fri, 14 Aug 2020 14:52:15 -0700 Subject: [PATCH 03/47] use the existing labelSelector value instead of using the service name as the value --- pkg/canary/controller.go | 2 +- pkg/canary/daemonset_controller.go | 28 ++++++++++++++-------------- pkg/canary/deployment_controller.go | 26 +++++++++++++------------- pkg/canary/service_controller.go | 6 +++--- pkg/canary/util.go | 4 ++-- pkg/controller/finalizer.go | 4 ++-- pkg/controller/scheduler.go | 4 ++-- pkg/router/factory.go | 3 ++- pkg/router/kubernetes_default.go | 7 ++++--- 9 files changed, 43 insertions(+), 41 deletions(-) diff --git a/pkg/canary/controller.go b/pkg/canary/controller.go index 98836991..1edf97ef 100644 --- a/pkg/canary/controller.go +++ b/pkg/canary/controller.go @@ -7,7 +7,7 @@ import ( type Controller interface { IsPrimaryReady(canary *flaggerv1.Canary) error IsCanaryReady(canary *flaggerv1.Canary) (bool, error) - GetMetadata(canary *flaggerv1.Canary) (string, map[string]int32, error) + GetMetadata(canary *flaggerv1.Canary) (string, string, map[string]int32, error) SyncStatus(canary *flaggerv1.Canary, status flaggerv1.CanaryStatus) error SetStatusFailedChecks(canary *flaggerv1.Canary, val int) error SetStatusWeight(canary *flaggerv1.Canary, val int) error diff --git a/pkg/canary/daemonset_controller.go b/pkg/canary/daemonset_controller.go index 7e0972cd..44833388 100644 --- a/pkg/canary/daemonset_controller.go +++ b/pkg/canary/daemonset_controller.go @@ -107,7 +107,7 @@ func (c *DaemonSetController) Promote(cd *flaggerv1.Canary) error { return fmt.Errorf("damonset %s.%s get query error: %v", targetName, cd.Namespace, err) } - label, err := c.getSelectorLabel(canary) + label, labelValue, err := c.getSelectorLabel(canary) if err != nil { return fmt.Errorf("getSelectorLabel failed: %w", err) } @@ -146,7 +146,7 @@ func (c *DaemonSetController) Promote(cd *flaggerv1.Canary) error { } primaryCopy.Spec.Template.Annotations = annotations - primaryCopy.Spec.Template.Labels = makePrimaryLabels(canary.Spec.Template.Labels, primaryName, label) + primaryCopy.Spec.Template.Labels = makePrimaryLabels(canary.Spec.Template.Labels, labelValue, label) // apply update _, err = c.kubeClient.AppsV1().DaemonSets(cd.Namespace).Update(context.TODO(), primaryCopy, metav1.UpdateOptions{}) @@ -179,24 +179,24 @@ func (c *DaemonSetController) HasTargetChanged(cd *flaggerv1.Canary) (bool, erro } // GetMetadata returns the pod label selector and svc ports -func (c *DaemonSetController) GetMetadata(cd *flaggerv1.Canary) (string, map[string]int32, error) { +func (c *DaemonSetController) GetMetadata(cd *flaggerv1.Canary) (string, string, map[string]int32, error) { targetName := cd.Spec.TargetRef.Name canaryDae, err := c.kubeClient.AppsV1().DaemonSets(cd.Namespace).Get(context.TODO(), targetName, metav1.GetOptions{}) if err != nil { - return "", nil, fmt.Errorf("daemonset %s.%s get query error: %w", targetName, cd.Namespace, err) + return "", "", nil, fmt.Errorf("daemonset %s.%s get query error: %w", targetName, cd.Namespace, err) } - label, err := c.getSelectorLabel(canaryDae) + label, labelValue, err := c.getSelectorLabel(canaryDae) if err != nil { - return "", nil, fmt.Errorf("getSelectorLabel failed: %w", err) + return "", "", nil, fmt.Errorf("getSelectorLabel failed: %w", err) } var ports map[string]int32 if cd.Spec.Service.PortDiscovery { ports = getPorts(cd, canaryDae.Spec.Template.Spec.Containers) } - return label, ports, nil + return label, labelValue, ports, nil } func (c *DaemonSetController) createPrimaryDaemonSet(cd *flaggerv1.Canary) error { @@ -214,7 +214,7 @@ func (c *DaemonSetController) createPrimaryDaemonSet(cd *flaggerv1.Canary) error targetName, cd.Namespace, canaryDae.Spec.UpdateStrategy.Type) } - label, err := c.getSelectorLabel(canaryDae) + label, labelValue, err := c.getSelectorLabel(canaryDae) if err != nil { return fmt.Errorf("getSelectorLabel failed: %w", err) } @@ -240,7 +240,7 @@ func (c *DaemonSetController) createPrimaryDaemonSet(cd *flaggerv1.Canary) error Name: primaryName, Namespace: cd.Namespace, Labels: map[string]string{ - label: primaryName, + label: labelValue, }, OwnerReferences: []metav1.OwnerReference{ *metav1.NewControllerRef(cd, schema.GroupVersionKind{ @@ -256,12 +256,12 @@ func (c *DaemonSetController) createPrimaryDaemonSet(cd *flaggerv1.Canary) error UpdateStrategy: canaryDae.Spec.UpdateStrategy, Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ - label: primaryName, + label: labelValue, }, }, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - Labels: makePrimaryLabels(canaryDae.Spec.Template.Labels, primaryName, label), + Labels: makePrimaryLabels(canaryDae.Spec.Template.Labels, labelValue, label), Annotations: annotations, }, // update spec with the primary secrets and config maps @@ -281,14 +281,14 @@ func (c *DaemonSetController) createPrimaryDaemonSet(cd *flaggerv1.Canary) error } // getSelectorLabel returns the selector match label -func (c *DaemonSetController) getSelectorLabel(daemonSet *appsv1.DaemonSet) (string, error) { +func (c *DaemonSetController) getSelectorLabel(daemonSet *appsv1.DaemonSet) (string, string, error) { for _, l := range c.labels { if _, ok := daemonSet.Spec.Selector.MatchLabels[l]; ok { - return l, nil + return l, daemonSet.Spec.Selector.MatchLabels[l], nil } } - return "", fmt.Errorf( + return "", "", fmt.Errorf( "daemonset %s.%s spec.selector.matchLabels must contain one of %v'", daemonSet.Name, daemonSet.Namespace, c.labels, ) diff --git a/pkg/canary/deployment_controller.go b/pkg/canary/deployment_controller.go index a9d0cd5f..72584d9d 100644 --- a/pkg/canary/deployment_controller.go +++ b/pkg/canary/deployment_controller.go @@ -72,7 +72,7 @@ func (c *DeploymentController) Promote(cd *flaggerv1.Canary) error { return fmt.Errorf("deployment %s.%s get query error: %w", targetName, cd.Namespace, err) } - label, err := c.getSelectorLabel(canary) + label, labelValue, err := c.getSelectorLabel(canary) if err != nil { return fmt.Errorf("getSelectorLabel failed: %w", err) } @@ -107,7 +107,7 @@ func (c *DeploymentController) Promote(cd *flaggerv1.Canary) error { } primaryCopy.Spec.Template.Annotations = annotations - primaryCopy.Spec.Template.Labels = makePrimaryLabels(canary.Spec.Template.Labels, primaryName, label) + primaryCopy.Spec.Template.Labels = makePrimaryLabels(canary.Spec.Template.Labels, labelValue, label) // apply update _, err = c.kubeClient.AppsV1().Deployments(cd.Namespace).Update(context.TODO(), primaryCopy, metav1.UpdateOptions{}) @@ -181,17 +181,17 @@ func (c *DeploymentController) ScaleFromZero(cd *flaggerv1.Canary) error { } // GetMetadata returns the pod label selector and svc ports -func (c *DeploymentController) GetMetadata(cd *flaggerv1.Canary) (string, map[string]int32, error) { +func (c *DeploymentController) GetMetadata(cd *flaggerv1.Canary) (string, string, map[string]int32, error) { targetName := cd.Spec.TargetRef.Name canaryDep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(context.TODO(), targetName, metav1.GetOptions{}) if err != nil { - return "", nil, fmt.Errorf("deployment %s.%s get query error: %w", targetName, cd.Namespace, err) + return "", "", nil, fmt.Errorf("deployment %s.%s get query error: %w", targetName, cd.Namespace, err) } - label, err := c.getSelectorLabel(canaryDep) + label, labelValue, err := c.getSelectorLabel(canaryDep) if err != nil { - return "", nil, fmt.Errorf("getSelectorLabel failed: %w", err) + return "", "", nil, fmt.Errorf("getSelectorLabel failed: %w", err) } var ports map[string]int32 @@ -199,7 +199,7 @@ func (c *DeploymentController) GetMetadata(cd *flaggerv1.Canary) (string, map[st ports = getPorts(cd, canaryDep.Spec.Template.Spec.Containers) } - return label, ports, nil + return label, labelValue, ports, nil } func (c *DeploymentController) createPrimaryDeployment(cd *flaggerv1.Canary) error { targetName := cd.Spec.TargetRef.Name @@ -210,7 +210,7 @@ func (c *DeploymentController) createPrimaryDeployment(cd *flaggerv1.Canary) err return fmt.Errorf("deplyoment %s.%s get query error: %w", targetName, cd.Namespace, err) } - label, err := c.getSelectorLabel(canaryDep) + label, labelValue, err := c.getSelectorLabel(canaryDep) if err != nil { return fmt.Errorf("getSelectorLabel failed: %w", err) } @@ -241,7 +241,7 @@ func (c *DeploymentController) createPrimaryDeployment(cd *flaggerv1.Canary) err Name: primaryName, Namespace: cd.Namespace, Labels: map[string]string{ - label: primaryName, + label: labelValue, }, OwnerReferences: []metav1.OwnerReference{ *metav1.NewControllerRef(cd, schema.GroupVersionKind{ @@ -259,7 +259,7 @@ func (c *DeploymentController) createPrimaryDeployment(cd *flaggerv1.Canary) err Strategy: canaryDep.Spec.Strategy, Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ - label: primaryName, + label: labelValue, }, }, Template: corev1.PodTemplateSpec{ @@ -361,14 +361,14 @@ func (c *DeploymentController) reconcilePrimaryHpa(cd *flaggerv1.Canary, init bo } // getSelectorLabel returns the selector match label -func (c *DeploymentController) getSelectorLabel(deployment *appsv1.Deployment) (string, error) { +func (c *DeploymentController) getSelectorLabel(deployment *appsv1.Deployment) (string, string, error) { for _, l := range c.labels { if _, ok := deployment.Spec.Selector.MatchLabels[l]; ok { - return l, nil + return l, deployment.Spec.Selector.MatchLabels[l], nil } } - return "", fmt.Errorf( + return "", "", fmt.Errorf( "deployment %s.%s spec.selector.matchLabels must contain one of %v", deployment.Name, deployment.Namespace, c.labels, ) diff --git a/pkg/canary/service_controller.go b/pkg/canary/service_controller.go index 1961b6cb..f63131da 100644 --- a/pkg/canary/service_controller.go +++ b/pkg/canary/service_controller.go @@ -42,9 +42,9 @@ func (c *ServiceController) SetStatusPhase(cd *flaggerv1.Canary, phase flaggerv1 return setStatusPhase(c.flaggerClient, cd, phase) } -// GetMetadata returns the pod label selector and svc ports -func (c *ServiceController) GetMetadata(_ *flaggerv1.Canary) (string, map[string]int32, error) { - return "", nil, nil +// GetMetadata returns the pod label selector, label value and svc ports +func (c *ServiceController) GetMetadata(_ *flaggerv1.Canary) (string, string, map[string]int32, error) { + return "", "", nil, nil } // Initialize creates or updates the primary and canary services to prepare for the canary release process targeted on the K8s service diff --git a/pkg/canary/util.go b/pkg/canary/util.go index f4f58304..254cd38a 100644 --- a/pkg/canary/util.go +++ b/pkg/canary/util.go @@ -75,14 +75,14 @@ func makeAnnotations(annotations map[string]string) (map[string]string, error) { return res, nil } -func makePrimaryLabels(labels map[string]string, primaryName string, label string) map[string]string { +func makePrimaryLabels(labels map[string]string, labelValue string, label string) map[string]string { res := make(map[string]string) for k, v := range labels { if k != label { res[k] = v } } - res[label] = primaryName + res[label] = labelValue return res } diff --git a/pkg/controller/finalizer.go b/pkg/controller/finalizer.go index 34633342..b1a6fbec 100644 --- a/pkg/controller/finalizer.go +++ b/pkg/controller/finalizer.go @@ -50,13 +50,13 @@ func (c *Controller) finalize(old interface{}) error { return fmt.Errorf("canary not ready during finalizing: %w", err) } - labelSelector, ports, err := canaryController.GetMetadata(canary) + labelSelector, labelValue, ports, err := canaryController.GetMetadata(canary) if err != nil { return fmt.Errorf("failed to get metadata for router finalizing: %w", err) } // Revert the Kubernetes service - router := c.routerFactory.KubernetesRouter(canary.Spec.TargetRef.Kind, labelSelector, ports) + router := c.routerFactory.KubernetesRouter(canary.Spec.TargetRef.Kind, labelSelector, labelValue, ports) if err := router.Finalize(canary); err != nil { return fmt.Errorf("failed revert router: %w", err) } diff --git a/pkg/controller/scheduler.go b/pkg/controller/scheduler.go index 11fac989..df4c4f6a 100644 --- a/pkg/controller/scheduler.go +++ b/pkg/controller/scheduler.go @@ -100,14 +100,14 @@ func (c *Controller) advanceCanary(name string, namespace string) { // init controller based on target kind canaryController := c.canaryFactory.Controller(cd.Spec.TargetRef.Kind) - labelSelector, ports, err := canaryController.GetMetadata(cd) + labelSelector, labelValue, ports, err := canaryController.GetMetadata(cd) if err != nil { c.recordEventWarningf(cd, "%v", err) return } // init Kubernetes router - kubeRouter := c.routerFactory.KubernetesRouter(cd.Spec.TargetRef.Kind, labelSelector, ports) + kubeRouter := c.routerFactory.KubernetesRouter(cd.Spec.TargetRef.Kind, labelSelector, labelValue, ports) // reconcile the canary/primary services if err := kubeRouter.Initialize(cd); err != nil { diff --git a/pkg/router/factory.go b/pkg/router/factory.go index 2a970c90..cd1835b3 100644 --- a/pkg/router/factory.go +++ b/pkg/router/factory.go @@ -39,7 +39,7 @@ func NewFactory(kubeConfig *restclient.Config, kubeClient kubernetes.Interface, } // KubernetesRouter returns a KubernetesRouter interface implementation -func (factory *Factory) KubernetesRouter(kind string, labelSelector string, ports map[string]int32) KubernetesRouter { +func (factory *Factory) KubernetesRouter(kind string, labelSelector string, labelValue string, ports map[string]int32) KubernetesRouter { switch kind { case "Service": return &KubernetesNoopRouter{} @@ -49,6 +49,7 @@ func (factory *Factory) KubernetesRouter(kind string, labelSelector string, port flaggerClient: factory.flaggerClient, kubeClient: factory.kubeClient, labelSelector: labelSelector, + labelValue: labelValue, ports: ports, } } diff --git a/pkg/router/kubernetes_default.go b/pkg/router/kubernetes_default.go index 739dc4c7..45eb5f3f 100644 --- a/pkg/router/kubernetes_default.go +++ b/pkg/router/kubernetes_default.go @@ -25,6 +25,7 @@ type KubernetesDefaultRouter struct { flaggerClient clientset.Interface logger *zap.SugaredLogger labelSelector string + labelValue string ports map[string]int32 } @@ -33,13 +34,13 @@ func (c *KubernetesDefaultRouter) Initialize(canary *flaggerv1.Canary) error { _, primaryName, canaryName := canary.GetServiceNames() // canary svc - err := c.reconcileService(canary, canaryName, canary.Spec.TargetRef.Name, canary.Spec.Service.Canary) + err := c.reconcileService(canary, canaryName, c.labelValue, canary.Spec.Service.Canary) if err != nil { return fmt.Errorf("reconcileService failed: %w", err) } // primary svc - err = c.reconcileService(canary, primaryName, fmt.Sprintf("%s-primary", canary.Spec.TargetRef.Name), canary.Spec.Service.Primary) + err = c.reconcileService(canary, primaryName, fmt.Sprintf("%s-primary", c.labelValue), canary.Spec.Service.Primary) if err != nil { return fmt.Errorf("reconcileService failed: %w", err) } @@ -52,7 +53,7 @@ func (c *KubernetesDefaultRouter) Reconcile(canary *flaggerv1.Canary) error { apexName, _, _ := canary.GetServiceNames() // main svc - err := c.reconcileService(canary, apexName, fmt.Sprintf("%s-primary", canary.Spec.TargetRef.Name), canary.Spec.Service.Apex) + err := c.reconcileService(canary, apexName, fmt.Sprintf("%s-primary", c.labelValue), canary.Spec.Service.Apex) if err != nil { return fmt.Errorf("reconcileService failed: %w", err) } From 6f372d787dc548c28d479522f5fd37c9639ee9db Mon Sep 17 00:00:00 2001 From: Forrest Thomas Date: Fri, 14 Aug 2020 16:14:46 -0700 Subject: [PATCH 04/47] fix the incorrect primary label value --- pkg/canary/daemonset_controller.go | 7 ++++--- pkg/canary/deployment_controller.go | 7 ++++--- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/pkg/canary/daemonset_controller.go b/pkg/canary/daemonset_controller.go index 44833388..a8d74de8 100644 --- a/pkg/canary/daemonset_controller.go +++ b/pkg/canary/daemonset_controller.go @@ -215,6 +215,7 @@ func (c *DaemonSetController) createPrimaryDaemonSet(cd *flaggerv1.Canary) error } label, labelValue, err := c.getSelectorLabel(canaryDae) + primaryLabelValue := fmt.Sprintf("%s-primary", labelValue); if err != nil { return fmt.Errorf("getSelectorLabel failed: %w", err) } @@ -240,7 +241,7 @@ func (c *DaemonSetController) createPrimaryDaemonSet(cd *flaggerv1.Canary) error Name: primaryName, Namespace: cd.Namespace, Labels: map[string]string{ - label: labelValue, + label: primaryLabelValue, }, OwnerReferences: []metav1.OwnerReference{ *metav1.NewControllerRef(cd, schema.GroupVersionKind{ @@ -256,12 +257,12 @@ func (c *DaemonSetController) createPrimaryDaemonSet(cd *flaggerv1.Canary) error UpdateStrategy: canaryDae.Spec.UpdateStrategy, Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ - label: labelValue, + label: primaryLabelValue, }, }, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - Labels: makePrimaryLabels(canaryDae.Spec.Template.Labels, labelValue, label), + Labels: makePrimaryLabels(canaryDae.Spec.Template.Labels, primaryLabelValue, label), Annotations: annotations, }, // update spec with the primary secrets and config maps diff --git a/pkg/canary/deployment_controller.go b/pkg/canary/deployment_controller.go index 72584d9d..b57a8d09 100644 --- a/pkg/canary/deployment_controller.go +++ b/pkg/canary/deployment_controller.go @@ -211,6 +211,7 @@ func (c *DeploymentController) createPrimaryDeployment(cd *flaggerv1.Canary) err } label, labelValue, err := c.getSelectorLabel(canaryDep) + primaryLabelValue := fmt.Sprintf("%s-primary", labelValue); if err != nil { return fmt.Errorf("getSelectorLabel failed: %w", err) } @@ -241,7 +242,7 @@ func (c *DeploymentController) createPrimaryDeployment(cd *flaggerv1.Canary) err Name: primaryName, Namespace: cd.Namespace, Labels: map[string]string{ - label: labelValue, + label: primaryLabelValue, }, OwnerReferences: []metav1.OwnerReference{ *metav1.NewControllerRef(cd, schema.GroupVersionKind{ @@ -259,12 +260,12 @@ func (c *DeploymentController) createPrimaryDeployment(cd *flaggerv1.Canary) err Strategy: canaryDep.Spec.Strategy, Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ - label: labelValue, + label: primaryLabelValue, }, }, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ - Labels: makePrimaryLabels(canaryDep.Spec.Template.Labels, primaryName, label), + Labels: makePrimaryLabels(canaryDep.Spec.Template.Labels, primaryLabelValue, label), Annotations: annotations, }, // update spec with the primary secrets and config maps From c9dc5c5936d887fa9f5f5df58ec15a9d2ff8dbb0 Mon Sep 17 00:00:00 2001 From: Forrest Thomas Date: Fri, 14 Aug 2020 16:35:50 -0700 Subject: [PATCH 05/47] fix incorrect primary label value during promotion --- pkg/canary/daemonset_controller.go | 3 ++- pkg/canary/deployment_controller.go | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/pkg/canary/daemonset_controller.go b/pkg/canary/daemonset_controller.go index a8d74de8..cff63280 100644 --- a/pkg/canary/daemonset_controller.go +++ b/pkg/canary/daemonset_controller.go @@ -108,6 +108,7 @@ func (c *DaemonSetController) Promote(cd *flaggerv1.Canary) error { } label, labelValue, err := c.getSelectorLabel(canary) + primaryLabelValue := fmt.Sprintf("%s-primary", labelValue); if err != nil { return fmt.Errorf("getSelectorLabel failed: %w", err) } @@ -146,7 +147,7 @@ func (c *DaemonSetController) Promote(cd *flaggerv1.Canary) error { } primaryCopy.Spec.Template.Annotations = annotations - primaryCopy.Spec.Template.Labels = makePrimaryLabels(canary.Spec.Template.Labels, labelValue, label) + primaryCopy.Spec.Template.Labels = makePrimaryLabels(canary.Spec.Template.Labels, primaryLabelValue, label) // apply update _, err = c.kubeClient.AppsV1().DaemonSets(cd.Namespace).Update(context.TODO(), primaryCopy, metav1.UpdateOptions{}) diff --git a/pkg/canary/deployment_controller.go b/pkg/canary/deployment_controller.go index b57a8d09..c3e3cc4e 100644 --- a/pkg/canary/deployment_controller.go +++ b/pkg/canary/deployment_controller.go @@ -73,6 +73,7 @@ func (c *DeploymentController) Promote(cd *flaggerv1.Canary) error { } label, labelValue, err := c.getSelectorLabel(canary) + primaryLabelValue := fmt.Sprintf("%s-primary", labelValue); if err != nil { return fmt.Errorf("getSelectorLabel failed: %w", err) } @@ -107,7 +108,7 @@ func (c *DeploymentController) Promote(cd *flaggerv1.Canary) error { } primaryCopy.Spec.Template.Annotations = annotations - primaryCopy.Spec.Template.Labels = makePrimaryLabels(canary.Spec.Template.Labels, labelValue, label) + primaryCopy.Spec.Template.Labels = makePrimaryLabels(canary.Spec.Template.Labels, primaryLabelValue, label) // apply update _, err = c.kubeClient.AppsV1().Deployments(cd.Namespace).Update(context.TODO(), primaryCopy, metav1.UpdateOptions{}) From 0db82b64f7987b2a35d9d0f5a37c50ad5ce53a9e Mon Sep 17 00:00:00 2001 From: Forrest Thomas Date: Fri, 21 Aug 2020 15:53:55 -0700 Subject: [PATCH 06/47] correct formatting --- pkg/canary/daemonset_controller.go | 4 ++-- pkg/canary/deployment_controller.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/canary/daemonset_controller.go b/pkg/canary/daemonset_controller.go index cff63280..fdd656c1 100644 --- a/pkg/canary/daemonset_controller.go +++ b/pkg/canary/daemonset_controller.go @@ -108,7 +108,7 @@ func (c *DaemonSetController) Promote(cd *flaggerv1.Canary) error { } label, labelValue, err := c.getSelectorLabel(canary) - primaryLabelValue := fmt.Sprintf("%s-primary", labelValue); + primaryLabelValue := fmt.Sprintf("%s-primary", labelValue) if err != nil { return fmt.Errorf("getSelectorLabel failed: %w", err) } @@ -216,7 +216,7 @@ func (c *DaemonSetController) createPrimaryDaemonSet(cd *flaggerv1.Canary) error } label, labelValue, err := c.getSelectorLabel(canaryDae) - primaryLabelValue := fmt.Sprintf("%s-primary", labelValue); + primaryLabelValue := fmt.Sprintf("%s-primary", labelValue) if err != nil { return fmt.Errorf("getSelectorLabel failed: %w", err) } diff --git a/pkg/canary/deployment_controller.go b/pkg/canary/deployment_controller.go index c3e3cc4e..40f3ba91 100644 --- a/pkg/canary/deployment_controller.go +++ b/pkg/canary/deployment_controller.go @@ -73,7 +73,7 @@ func (c *DeploymentController) Promote(cd *flaggerv1.Canary) error { } label, labelValue, err := c.getSelectorLabel(canary) - primaryLabelValue := fmt.Sprintf("%s-primary", labelValue); + primaryLabelValue := fmt.Sprintf("%s-primary", labelValue) if err != nil { return fmt.Errorf("getSelectorLabel failed: %w", err) } @@ -212,7 +212,7 @@ func (c *DeploymentController) createPrimaryDeployment(cd *flaggerv1.Canary) err } label, labelValue, err := c.getSelectorLabel(canaryDep) - primaryLabelValue := fmt.Sprintf("%s-primary", labelValue); + primaryLabelValue := fmt.Sprintf("%s-primary", labelValue) if err != nil { return fmt.Errorf("getSelectorLabel failed: %w", err) } From b378b3eb5d29f32bd886e72cbe23a7abee6fbff6 Mon Sep 17 00:00:00 2001 From: Forrest Thomas Date: Mon, 31 Aug 2020 16:07:40 -0700 Subject: [PATCH 07/47] setup deployment tests to allow configurable name, label and selector --- pkg/canary/config_tracker_test.go | 6 ++++-- pkg/canary/deployment_controller_test.go | 20 +++++++++++++------- pkg/canary/deployment_fixture_test.go | 18 ++++++++++++------ pkg/canary/deployment_ready_test.go | 6 ++++-- pkg/canary/deployment_status_test.go | 9 ++++++--- 5 files changed, 39 insertions(+), 20 deletions(-) diff --git a/pkg/canary/config_tracker_test.go b/pkg/canary/config_tracker_test.go index d7bbae2b..fb8423f8 100644 --- a/pkg/canary/config_tracker_test.go +++ b/pkg/canary/config_tracker_test.go @@ -25,7 +25,8 @@ func TestConfigIsDisabled(t *testing.T) { func TestConfigTracker_ConfigMaps(t *testing.T) { t.Run("deployment", func(t *testing.T) { - mocks := newDeploymentFixture() + depConfig := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDeploymentFixture(depConfig) configMap := newDeploymentControllerTestConfigMap() configMapProjected := newDeploymentControllerTestConfigProjected() @@ -156,7 +157,8 @@ func TestConfigTracker_ConfigMaps(t *testing.T) { func TestConfigTracker_Secrets(t *testing.T) { t.Run("deployment", func(t *testing.T) { - mocks := newDeploymentFixture() + depConfig := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDeploymentFixture(depConfig) secret := newDeploymentControllerTestSecret() secretProjected := newDeploymentControllerTestSecretProjected() diff --git a/pkg/canary/deployment_controller_test.go b/pkg/canary/deployment_controller_test.go index 7dcfe7a4..ce0703f3 100644 --- a/pkg/canary/deployment_controller_test.go +++ b/pkg/canary/deployment_controller_test.go @@ -15,13 +15,14 @@ import ( ) func TestDeploymentController_Sync(t *testing.T) { - mocks := newDeploymentFixture() + depConfig := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDeploymentFixture(depConfig) mocks.initializeCanary(t) depPrimary, err := mocks.kubeClient.AppsV1().Deployments("default").Get(context.TODO(), "podinfo-primary", metav1.GetOptions{}) require.NoError(t, err) - dep := newDeploymentControllerTest() + dep := newDeploymentControllerTest(depConfig) primaryImage := depPrimary.Spec.Template.Spec.Containers[0].Image sourceImage := dep.Spec.Template.Spec.Containers[0].Image assert.Equal(t, sourceImage, primaryImage) @@ -32,7 +33,8 @@ func TestDeploymentController_Sync(t *testing.T) { } func TestDeploymentController_Promote(t *testing.T) { - mocks := newDeploymentFixture() + depConfig := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDeploymentFixture(depConfig) mocks.initializeCanary(t) dep2 := newDeploymentControllerTestV2() @@ -72,7 +74,8 @@ func TestDeploymentController_Promote(t *testing.T) { } func TestDeploymentController_ScaleToZero(t *testing.T) { - mocks := newDeploymentFixture() + depConfig := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDeploymentFixture(depConfig) mocks.initializeCanary(t) err := mocks.controller.ScaleToZero(mocks.canary) @@ -84,7 +87,8 @@ func TestDeploymentController_ScaleToZero(t *testing.T) { } func TestDeploymentController_NoConfigTracking(t *testing.T) { - mocks := newDeploymentFixture() + depConfig := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDeploymentFixture(depConfig) mocks.controller.configTracker = &NopTracker{} mocks.initializeCanary(t) @@ -99,7 +103,8 @@ func TestDeploymentController_NoConfigTracking(t *testing.T) { } func TestDeploymentController_HasTargetChanged(t *testing.T) { - mocks := newDeploymentFixture() + depConfig := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDeploymentFixture(depConfig) mocks.initializeCanary(t) // save last applied hash @@ -185,7 +190,8 @@ func TestDeploymentController_HasTargetChanged(t *testing.T) { } func TestDeploymentController_Finalize(t *testing.T) { - mocks := newDeploymentFixture() + depConfig := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDeploymentFixture(depConfig) for _, tc := range []struct { mocks deploymentControllerFixture diff --git a/pkg/canary/deployment_fixture_test.go b/pkg/canary/deployment_fixture_test.go index aec070ff..cf474a11 100644 --- a/pkg/canary/deployment_fixture_test.go +++ b/pkg/canary/deployment_fixture_test.go @@ -29,6 +29,12 @@ type deploymentControllerFixture struct { logger *zap.SugaredLogger } +type deploymentConfigs struct { + name string + labelValue string + label string +} + func (d deploymentControllerFixture) initializeCanary(t *testing.T) { err := d.controller.Initialize(d.canary) require.Error(t, err) // not ready yet @@ -51,14 +57,14 @@ func (d deploymentControllerFixture) initializeCanary(t *testing.T) { require.NoError(t, d.controller.Initialize(d.canary)) } -func newDeploymentFixture() deploymentControllerFixture { +func newDeploymentFixture(dc deploymentConfigs) deploymentControllerFixture { // init canary canary := newDeploymentControllerTestCanary() flaggerClient := fakeFlagger.NewSimpleClientset(canary) // init kube clientset and register mock objects kubeClient := fake.NewSimpleClientset( - newDeploymentControllerTest(), + newDeploymentControllerTest(dc), newDeploymentControllerTestHPA(), newDeploymentControllerTestConfigMap(), newDeploymentControllerTestConfigMapEnv(), @@ -322,23 +328,23 @@ func newDeploymentControllerTestCanary() *flaggerv1.Canary { return cd } -func newDeploymentControllerTest() *appsv1.Deployment { +func newDeploymentControllerTest(dc deploymentConfigs) *appsv1.Deployment { d := &appsv1.Deployment{ TypeMeta: metav1.TypeMeta{APIVersion: appsv1.SchemeGroupVersion.String()}, ObjectMeta: metav1.ObjectMeta{ Namespace: "default", - Name: "podinfo", + Name: dc.name, }, Spec: appsv1.DeploymentSpec{ Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ - "name": "podinfo", + dc.label: dc.labelValue, }, }, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - "name": "podinfo", + dc.label: dc.labelValue, }, }, Spec: corev1.PodSpec{ diff --git a/pkg/canary/deployment_ready_test.go b/pkg/canary/deployment_ready_test.go index e3d07678..619a0378 100644 --- a/pkg/canary/deployment_ready_test.go +++ b/pkg/canary/deployment_ready_test.go @@ -12,7 +12,8 @@ import ( ) func TestDeploymentController_IsReady(t *testing.T) { - mocks := newDeploymentFixture() + depConfig := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDeploymentFixture(depConfig) mocks.controller.Initialize(mocks.canary) err := mocks.controller.IsPrimaryReady(mocks.canary) @@ -23,7 +24,8 @@ func TestDeploymentController_IsReady(t *testing.T) { } func TestDeploymentController_isDeploymentReady(t *testing.T) { - mocks := newDeploymentFixture() + depConfig := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDeploymentFixture(depConfig) // observed generation is less than desired generation dp := &appsv1.Deployment{Status: appsv1.DeploymentStatus{ObservedGeneration: -1}} diff --git a/pkg/canary/deployment_status_test.go b/pkg/canary/deployment_status_test.go index 26b291a3..99f172f6 100644 --- a/pkg/canary/deployment_status_test.go +++ b/pkg/canary/deployment_status_test.go @@ -12,7 +12,8 @@ import ( ) func TestDeploymentController_SyncStatus(t *testing.T) { - mocks := newDeploymentFixture() + depConfig := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDeploymentFixture(depConfig) mocks.initializeCanary(t) status := flaggerv1.CanaryStatus{ @@ -35,7 +36,8 @@ func TestDeploymentController_SyncStatus(t *testing.T) { } func TestDeploymentController_SetFailedChecks(t *testing.T) { - mocks := newDeploymentFixture() + depConfig := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDeploymentFixture(depConfig) mocks.initializeCanary(t) err := mocks.controller.SetStatusFailedChecks(mocks.canary, 1) @@ -47,7 +49,8 @@ func TestDeploymentController_SetFailedChecks(t *testing.T) { } func TestDeploymentController_SetState(t *testing.T) { - mocks := newDeploymentFixture() + depConfig := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDeploymentFixture(depConfig) mocks.initializeCanary(t) err := mocks.controller.SetStatusPhase(mocks.canary, flaggerv1.CanaryPhaseProgressing) From 364fd0db658afd58605d033e2896c696690c824d Mon Sep 17 00:00:00 2001 From: Forrest Thomas Date: Mon, 31 Aug 2020 16:21:07 -0700 Subject: [PATCH 08/47] setup daemonset tests to allow configurable name, label and selector --- pkg/canary/config_tracker_test.go | 14 +++++++------ pkg/canary/daemonset_controller_test.go | 23 +++++++++++++-------- pkg/canary/daemonset_fixture_test.go | 18 ++++++++++------ pkg/canary/daemonset_ready_test.go | 6 ++++-- pkg/canary/daemonset_status_test.go | 9 +++++--- pkg/canary/deployment_controller_test.go | 26 ++++++++++++------------ pkg/canary/deployment_ready_test.go | 8 ++++---- pkg/canary/deployment_status_test.go | 12 +++++------ 8 files changed, 68 insertions(+), 48 deletions(-) diff --git a/pkg/canary/config_tracker_test.go b/pkg/canary/config_tracker_test.go index fb8423f8..423befc7 100644 --- a/pkg/canary/config_tracker_test.go +++ b/pkg/canary/config_tracker_test.go @@ -25,8 +25,8 @@ func TestConfigIsDisabled(t *testing.T) { func TestConfigTracker_ConfigMaps(t *testing.T) { t.Run("deployment", func(t *testing.T) { - depConfig := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} - mocks := newDeploymentFixture(depConfig) + dc := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDeploymentFixture(dc) configMap := newDeploymentControllerTestConfigMap() configMapProjected := newDeploymentControllerTestConfigProjected() @@ -90,7 +90,8 @@ func TestConfigTracker_ConfigMaps(t *testing.T) { }) t.Run("daemonset", func(t *testing.T) { - mocks := newDaemonSetFixture() + dc := daemonsetConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDaemonSetFixture(dc) configMap := newDaemonSetControllerTestConfigMap() configMapProjected := newDaemonSetControllerTestConfigProjected() @@ -157,8 +158,8 @@ func TestConfigTracker_ConfigMaps(t *testing.T) { func TestConfigTracker_Secrets(t *testing.T) { t.Run("deployment", func(t *testing.T) { - depConfig := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} - mocks := newDeploymentFixture(depConfig) + dc := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDeploymentFixture(dc) secret := newDeploymentControllerTestSecret() secretProjected := newDeploymentControllerTestSecretProjected() @@ -222,7 +223,8 @@ func TestConfigTracker_Secrets(t *testing.T) { }) t.Run("daemonset", func(t *testing.T) { - mocks := newDaemonSetFixture() + dc := daemonsetConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDaemonSetFixture(dc) secret := newDaemonSetControllerTestSecret() secretProjected := newDaemonSetControllerTestSecretProjected() diff --git a/pkg/canary/daemonset_controller_test.go b/pkg/canary/daemonset_controller_test.go index d7c67e88..581d4c3f 100644 --- a/pkg/canary/daemonset_controller_test.go +++ b/pkg/canary/daemonset_controller_test.go @@ -15,21 +15,23 @@ import ( ) func TestDaemonSetController_Sync(t *testing.T) { - mocks := newDaemonSetFixture() + dc := daemonsetConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDaemonSetFixture(dc) err := mocks.controller.Initialize(mocks.canary) require.NoError(t, err) daePrimary, err := mocks.kubeClient.AppsV1().DaemonSets("default").Get(context.TODO(), "podinfo-primary", metav1.GetOptions{}) require.NoError(t, err) - dae := newDaemonSetControllerTestPodInfo() + dae := newDaemonSetControllerTestPodInfo(dc) primaryImage := daePrimary.Spec.Template.Spec.Containers[0].Image sourceImage := dae.Spec.Template.Spec.Containers[0].Image assert.Equal(t, primaryImage, sourceImage) } func TestDaemonSetController_Promote(t *testing.T) { - mocks := newDaemonSetFixture() + dc := daemonsetConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDaemonSetFixture(dc) err := mocks.controller.Initialize(mocks.canary) require.NoError(t, err) @@ -58,7 +60,8 @@ func TestDaemonSetController_Promote(t *testing.T) { } func TestDaemonSetController_NoConfigTracking(t *testing.T) { - mocks := newDaemonSetFixture() + dc := daemonsetConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDaemonSetFixture(dc) mocks.controller.configTracker = &NopTracker{} err := mocks.controller.Initialize(mocks.canary) @@ -75,7 +78,8 @@ func TestDaemonSetController_NoConfigTracking(t *testing.T) { } func TestDaemonSetController_HasTargetChanged(t *testing.T) { - mocks := newDaemonSetFixture() + dc := daemonsetConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDaemonSetFixture(dc) err := mocks.controller.Initialize(mocks.canary) require.NoError(t, err) @@ -163,7 +167,8 @@ func TestDaemonSetController_HasTargetChanged(t *testing.T) { func TestDaemonSetController_Scale(t *testing.T) { t.Run("ScaleToZero", func(t *testing.T) { - mocks := newDaemonSetFixture() + dc := daemonsetConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDaemonSetFixture(dc) err := mocks.controller.Initialize(mocks.canary) require.NoError(t, err) @@ -179,7 +184,8 @@ func TestDaemonSetController_Scale(t *testing.T) { } }) t.Run("ScaleFromZeo", func(t *testing.T) { - mocks := newDaemonSetFixture() + dc := daemonsetConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDaemonSetFixture(dc) err := mocks.controller.Initialize(mocks.canary) require.NoError(t, err) @@ -197,7 +203,8 @@ func TestDaemonSetController_Scale(t *testing.T) { } func TestDaemonSetController_Finalize(t *testing.T) { - mocks := newDaemonSetFixture() + dc := daemonsetConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDaemonSetFixture(dc) err := mocks.controller.Initialize(mocks.canary) require.NoError(t, err) diff --git a/pkg/canary/daemonset_fixture_test.go b/pkg/canary/daemonset_fixture_test.go index 4d0e8291..db474126 100644 --- a/pkg/canary/daemonset_fixture_test.go +++ b/pkg/canary/daemonset_fixture_test.go @@ -23,14 +23,20 @@ type daemonSetControllerFixture struct { logger *zap.SugaredLogger } -func newDaemonSetFixture() daemonSetControllerFixture { +type daemonsetConfigs struct { + name string + labelValue string + label string +} + +func newDaemonSetFixture(dc daemonsetConfigs) daemonSetControllerFixture { // init canary canary := newDaemonSetControllerTestCanary() flaggerClient := fakeFlagger.NewSimpleClientset(canary) // init kube clientset and register mock objects kubeClient := fake.NewSimpleClientset( - newDaemonSetControllerTestPodInfo(), + newDaemonSetControllerTestPodInfo(dc), newDaemonSetControllerTestConfigMap(), newDaemonSetControllerTestConfigMapEnv(), newDaemonSetControllerTestConfigMapVol(), @@ -282,23 +288,23 @@ func newDaemonSetControllerTestCanary() *flaggerv1.Canary { return cd } -func newDaemonSetControllerTestPodInfo() *appsv1.DaemonSet { +func newDaemonSetControllerTestPodInfo(dc daemonsetConfigs) *appsv1.DaemonSet { d := &appsv1.DaemonSet{ TypeMeta: metav1.TypeMeta{APIVersion: appsv1.SchemeGroupVersion.String()}, ObjectMeta: metav1.ObjectMeta{ Namespace: "default", - Name: "podinfo", + Name: dc.name, }, Spec: appsv1.DaemonSetSpec{ Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ - "name": "podinfo", + dc.label: dc.labelValue, }, }, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - "name": "podinfo", + dc.label: dc.labelValue, }, }, Spec: corev1.PodSpec{ diff --git a/pkg/canary/daemonset_ready_test.go b/pkg/canary/daemonset_ready_test.go index 99cbda2b..4638bed4 100644 --- a/pkg/canary/daemonset_ready_test.go +++ b/pkg/canary/daemonset_ready_test.go @@ -12,7 +12,8 @@ import ( ) func TestDaemonSetController_IsReady(t *testing.T) { - mocks := newDaemonSetFixture() + dc := daemonsetConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDaemonSetFixture(dc) err := mocks.controller.Initialize(mocks.canary) require.NoError(t, err) @@ -24,7 +25,8 @@ func TestDaemonSetController_IsReady(t *testing.T) { } func TestDaemonSetController_isDaemonSetReady(t *testing.T) { - mocks := newDaemonSetFixture() + dc := daemonsetConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDaemonSetFixture(dc) cd := &flaggerv1.Canary{} // observed generation is less than desired generation diff --git a/pkg/canary/daemonset_status_test.go b/pkg/canary/daemonset_status_test.go index 12f822d5..334f5c7b 100644 --- a/pkg/canary/daemonset_status_test.go +++ b/pkg/canary/daemonset_status_test.go @@ -12,7 +12,8 @@ import ( ) func TestDaemonSetController_SyncStatus(t *testing.T) { - mocks := newDaemonSetFixture() + dc := daemonsetConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDaemonSetFixture(dc) err := mocks.controller.Initialize(mocks.canary) require.NoError(t, err) @@ -36,7 +37,8 @@ func TestDaemonSetController_SyncStatus(t *testing.T) { } func TestDaemonSetController_SetFailedChecks(t *testing.T) { - mocks := newDaemonSetFixture() + dc := daemonsetConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDaemonSetFixture(dc) err := mocks.controller.Initialize(mocks.canary) require.NoError(t, err) @@ -49,7 +51,8 @@ func TestDaemonSetController_SetFailedChecks(t *testing.T) { } func TestDaemonSetController_SetState(t *testing.T) { - mocks := newDaemonSetFixture() + dc := daemonsetConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDaemonSetFixture(dc) err := mocks.controller.Initialize(mocks.canary) require.NoError(t, err) diff --git a/pkg/canary/deployment_controller_test.go b/pkg/canary/deployment_controller_test.go index ce0703f3..ec4b9b57 100644 --- a/pkg/canary/deployment_controller_test.go +++ b/pkg/canary/deployment_controller_test.go @@ -15,14 +15,14 @@ import ( ) func TestDeploymentController_Sync(t *testing.T) { - depConfig := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} - mocks := newDeploymentFixture(depConfig) + dc := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDeploymentFixture(dc) mocks.initializeCanary(t) depPrimary, err := mocks.kubeClient.AppsV1().Deployments("default").Get(context.TODO(), "podinfo-primary", metav1.GetOptions{}) require.NoError(t, err) - dep := newDeploymentControllerTest(depConfig) + dep := newDeploymentControllerTest(dc) primaryImage := depPrimary.Spec.Template.Spec.Containers[0].Image sourceImage := dep.Spec.Template.Spec.Containers[0].Image assert.Equal(t, sourceImage, primaryImage) @@ -33,8 +33,8 @@ func TestDeploymentController_Sync(t *testing.T) { } func TestDeploymentController_Promote(t *testing.T) { - depConfig := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} - mocks := newDeploymentFixture(depConfig) + dc := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDeploymentFixture(dc) mocks.initializeCanary(t) dep2 := newDeploymentControllerTestV2() @@ -74,8 +74,8 @@ func TestDeploymentController_Promote(t *testing.T) { } func TestDeploymentController_ScaleToZero(t *testing.T) { - depConfig := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} - mocks := newDeploymentFixture(depConfig) + dc := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDeploymentFixture(dc) mocks.initializeCanary(t) err := mocks.controller.ScaleToZero(mocks.canary) @@ -87,8 +87,8 @@ func TestDeploymentController_ScaleToZero(t *testing.T) { } func TestDeploymentController_NoConfigTracking(t *testing.T) { - depConfig := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} - mocks := newDeploymentFixture(depConfig) + dc := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDeploymentFixture(dc) mocks.controller.configTracker = &NopTracker{} mocks.initializeCanary(t) @@ -103,8 +103,8 @@ func TestDeploymentController_NoConfigTracking(t *testing.T) { } func TestDeploymentController_HasTargetChanged(t *testing.T) { - depConfig := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} - mocks := newDeploymentFixture(depConfig) + dc := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDeploymentFixture(dc) mocks.initializeCanary(t) // save last applied hash @@ -190,8 +190,8 @@ func TestDeploymentController_HasTargetChanged(t *testing.T) { } func TestDeploymentController_Finalize(t *testing.T) { - depConfig := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} - mocks := newDeploymentFixture(depConfig) + dc := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDeploymentFixture(dc) for _, tc := range []struct { mocks deploymentControllerFixture diff --git a/pkg/canary/deployment_ready_test.go b/pkg/canary/deployment_ready_test.go index 619a0378..48da6a47 100644 --- a/pkg/canary/deployment_ready_test.go +++ b/pkg/canary/deployment_ready_test.go @@ -12,8 +12,8 @@ import ( ) func TestDeploymentController_IsReady(t *testing.T) { - depConfig := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} - mocks := newDeploymentFixture(depConfig) + dc := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDeploymentFixture(dc) mocks.controller.Initialize(mocks.canary) err := mocks.controller.IsPrimaryReady(mocks.canary) @@ -24,8 +24,8 @@ func TestDeploymentController_IsReady(t *testing.T) { } func TestDeploymentController_isDeploymentReady(t *testing.T) { - depConfig := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} - mocks := newDeploymentFixture(depConfig) + dc := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDeploymentFixture(dc) // observed generation is less than desired generation dp := &appsv1.Deployment{Status: appsv1.DeploymentStatus{ObservedGeneration: -1}} diff --git a/pkg/canary/deployment_status_test.go b/pkg/canary/deployment_status_test.go index 99f172f6..c7364b57 100644 --- a/pkg/canary/deployment_status_test.go +++ b/pkg/canary/deployment_status_test.go @@ -12,8 +12,8 @@ import ( ) func TestDeploymentController_SyncStatus(t *testing.T) { - depConfig := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} - mocks := newDeploymentFixture(depConfig) + dc := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDeploymentFixture(dc) mocks.initializeCanary(t) status := flaggerv1.CanaryStatus{ @@ -36,8 +36,8 @@ func TestDeploymentController_SyncStatus(t *testing.T) { } func TestDeploymentController_SetFailedChecks(t *testing.T) { - depConfig := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} - mocks := newDeploymentFixture(depConfig) + dc := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDeploymentFixture(dc) mocks.initializeCanary(t) err := mocks.controller.SetStatusFailedChecks(mocks.canary, 1) @@ -49,8 +49,8 @@ func TestDeploymentController_SetFailedChecks(t *testing.T) { } func TestDeploymentController_SetState(t *testing.T) { - depConfig := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} - mocks := newDeploymentFixture(depConfig) + dc := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} + mocks := newDeploymentFixture(dc) mocks.initializeCanary(t) err := mocks.controller.SetStatusPhase(mocks.canary, flaggerv1.CanaryPhaseProgressing) From 1bd7ce4eedbeff6e29883373d4dbb811b6dac9de Mon Sep 17 00:00:00 2001 From: Forrest Thomas Date: Mon, 31 Aug 2020 17:13:18 -0700 Subject: [PATCH 09/47] add a small test for verifying the label selector is named as expected for deployments --- pkg/canary/deployment_controller_test.go | 31 ++++++++++++++++++++++-- pkg/canary/deployment_fixture_test.go | 11 ++++++--- 2 files changed, 37 insertions(+), 5 deletions(-) diff --git a/pkg/canary/deployment_controller_test.go b/pkg/canary/deployment_controller_test.go index ec4b9b57..f7918287 100644 --- a/pkg/canary/deployment_controller_test.go +++ b/pkg/canary/deployment_controller_test.go @@ -2,6 +2,7 @@ package canary import ( "context" + "fmt" "testing" "github.com/stretchr/testify/assert" @@ -14,12 +15,12 @@ import ( flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1beta1" ) -func TestDeploymentController_Sync(t *testing.T) { +func TestDeploymentController_Sync_ConsistentNaming(t *testing.T) { dc := deploymentConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} mocks := newDeploymentFixture(dc) mocks.initializeCanary(t) - depPrimary, err := mocks.kubeClient.AppsV1().Deployments("default").Get(context.TODO(), "podinfo-primary", metav1.GetOptions{}) + depPrimary, err := mocks.kubeClient.AppsV1().Deployments("default").Get(context.TODO(), fmt.Sprintf("%s-primary", dc.name), metav1.GetOptions{}) require.NoError(t, err) dep := newDeploymentControllerTest(dc) @@ -27,6 +28,32 @@ func TestDeploymentController_Sync(t *testing.T) { sourceImage := dep.Spec.Template.Spec.Containers[0].Image assert.Equal(t, sourceImage, primaryImage) + primarySelectorValue := depPrimary.Spec.Selector.MatchLabels[dc.label] + sourceSelectorValue := dep.Spec.Selector.MatchLabels[dc.label] + assert.Equal(t, primarySelectorValue, fmt.Sprintf("%s-primary", sourceSelectorValue)) + + hpaPrimary, err := mocks.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers("default").Get(context.TODO(), "podinfo-primary", metav1.GetOptions{}) + require.NoError(t, err) + assert.Equal(t, depPrimary.Name, hpaPrimary.Spec.ScaleTargetRef.Name) +} + +func TestDeploymentController_Sync_InconsistentNaming(t *testing.T) { + dc := deploymentConfigs{name: "podinfo-service", label: "name", labelValue: "podinfo"} + mocks := newDeploymentFixture(dc) + mocks.initializeCanary(t) + + depPrimary, err := mocks.kubeClient.AppsV1().Deployments("default").Get(context.TODO(), fmt.Sprintf("%s-primary", dc.name), metav1.GetOptions{}) + require.NoError(t, err) + + dep := newDeploymentControllerTest(dc) + primaryImage := depPrimary.Spec.Template.Spec.Containers[0].Image + sourceImage := dep.Spec.Template.Spec.Containers[0].Image + assert.Equal(t, sourceImage, primaryImage) + + primarySelectorValue := depPrimary.Spec.Selector.MatchLabels[dc.label] + sourceSelectorValue := dep.Spec.Selector.MatchLabels[dc.label] + assert.Equal(t, primarySelectorValue, fmt.Sprintf("%s-primary", sourceSelectorValue)) + hpaPrimary, err := mocks.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers("default").Get(context.TODO(), "podinfo-primary", metav1.GetOptions{}) require.NoError(t, err) assert.Equal(t, depPrimary.Name, hpaPrimary.Spec.ScaleTargetRef.Name) diff --git a/pkg/canary/deployment_fixture_test.go b/pkg/canary/deployment_fixture_test.go index cf474a11..7cbe6bdc 100644 --- a/pkg/canary/deployment_fixture_test.go +++ b/pkg/canary/deployment_fixture_test.go @@ -29,6 +29,10 @@ type deploymentControllerFixture struct { logger *zap.SugaredLogger } +type canaryConfigs struct { + targetName string +} + type deploymentConfigs struct { name string labelValue string @@ -59,7 +63,8 @@ func (d deploymentControllerFixture) initializeCanary(t *testing.T) { func newDeploymentFixture(dc deploymentConfigs) deploymentControllerFixture { // init canary - canary := newDeploymentControllerTestCanary() + cc := canaryConfigs{targetName: dc.name} + canary := newDeploymentControllerTestCanary(cc) flaggerClient := fakeFlagger.NewSimpleClientset(canary) // init kube clientset and register mock objects @@ -299,7 +304,7 @@ func newDeploymentControllerTestSecretTrackerDisabled() *corev1.Secret { } } -func newDeploymentControllerTestCanary() *flaggerv1.Canary { +func newDeploymentControllerTestCanary(cc canaryConfigs) *flaggerv1.Canary { cd := &flaggerv1.Canary{ TypeMeta: metav1.TypeMeta{APIVersion: flaggerv1.SchemeGroupVersion.String()}, ObjectMeta: metav1.ObjectMeta{ @@ -308,7 +313,7 @@ func newDeploymentControllerTestCanary() *flaggerv1.Canary { }, Spec: flaggerv1.CanarySpec{ TargetRef: flaggerv1.CrossNamespaceObjectReference{ - Name: "podinfo", + Name: cc.targetName, APIVersion: "apps/v1", Kind: "Deployment", }, From ef57dcf75dc45d25be0aa8a678731d238a9ac2d0 Mon Sep 17 00:00:00 2001 From: Forrest Thomas Date: Tue, 1 Sep 2020 09:32:51 -0700 Subject: [PATCH 10/47] add a small test for verifying the label selector is named as expected for daemonsets --- pkg/canary/daemonset_controller_test.go | 28 +++++++++++++++++++++++-- pkg/canary/daemonset_fixture_test.go | 6 +++--- 2 files changed, 29 insertions(+), 5 deletions(-) diff --git a/pkg/canary/daemonset_controller_test.go b/pkg/canary/daemonset_controller_test.go index 581d4c3f..85bd2c2c 100644 --- a/pkg/canary/daemonset_controller_test.go +++ b/pkg/canary/daemonset_controller_test.go @@ -2,6 +2,7 @@ package canary import ( "context" + "fmt" "testing" "github.com/stretchr/testify/assert" @@ -14,19 +15,42 @@ import ( flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1beta1" ) -func TestDaemonSetController_Sync(t *testing.T) { +func TestDaemonSetController_Sync_ConsistentNaming(t *testing.T) { dc := daemonsetConfigs{name: "podinfo", label: "name", labelValue: "podinfo"} mocks := newDaemonSetFixture(dc) err := mocks.controller.Initialize(mocks.canary) require.NoError(t, err) - daePrimary, err := mocks.kubeClient.AppsV1().DaemonSets("default").Get(context.TODO(), "podinfo-primary", metav1.GetOptions{}) + daePrimary, err := mocks.kubeClient.AppsV1().DaemonSets("default").Get(context.TODO(), fmt.Sprintf("%s-primary", dc.name), metav1.GetOptions{}) require.NoError(t, err) dae := newDaemonSetControllerTestPodInfo(dc) primaryImage := daePrimary.Spec.Template.Spec.Containers[0].Image sourceImage := dae.Spec.Template.Spec.Containers[0].Image assert.Equal(t, primaryImage, sourceImage) + + primarySelectorValue := daePrimary.Spec.Selector.MatchLabels[dc.label] + sourceSelectorValue := dae.Spec.Selector.MatchLabels[dc.label] + assert.Equal(t, primarySelectorValue, fmt.Sprintf("%s-primary", sourceSelectorValue)) +} + +func TestDaemonSetController_Sync_InconsistentNaming(t *testing.T) { + dc := daemonsetConfigs{name: "podinfo-service", label: "name", labelValue: "podinfo"} + mocks := newDaemonSetFixture(dc) + err := mocks.controller.Initialize(mocks.canary) + require.NoError(t, err) + + daePrimary, err := mocks.kubeClient.AppsV1().DaemonSets("default").Get(context.TODO(), fmt.Sprintf("%s-primary", dc.name), metav1.GetOptions{}) + require.NoError(t, err) + + dae := newDaemonSetControllerTestPodInfo(dc) + primaryImage := daePrimary.Spec.Template.Spec.Containers[0].Image + sourceImage := dae.Spec.Template.Spec.Containers[0].Image + assert.Equal(t, primaryImage, sourceImage) + + primarySelectorValue := daePrimary.Spec.Selector.MatchLabels[dc.label] + sourceSelectorValue := dae.Spec.Selector.MatchLabels[dc.label] + assert.Equal(t, primarySelectorValue, fmt.Sprintf("%s-primary", sourceSelectorValue)) } func TestDaemonSetController_Promote(t *testing.T) { diff --git a/pkg/canary/daemonset_fixture_test.go b/pkg/canary/daemonset_fixture_test.go index db474126..a844c6ca 100644 --- a/pkg/canary/daemonset_fixture_test.go +++ b/pkg/canary/daemonset_fixture_test.go @@ -31,7 +31,7 @@ type daemonsetConfigs struct { func newDaemonSetFixture(dc daemonsetConfigs) daemonSetControllerFixture { // init canary - canary := newDaemonSetControllerTestCanary() + canary := newDaemonSetControllerTestCanary(dc) flaggerClient := fakeFlagger.NewSimpleClientset(canary) // init kube clientset and register mock objects @@ -270,7 +270,7 @@ func newDaemonSetControllerTestSecretTrackerDisabled() *corev1.Secret { } } -func newDaemonSetControllerTestCanary() *flaggerv1.Canary { +func newDaemonSetControllerTestCanary(dc daemonsetConfigs) *flaggerv1.Canary { cd := &flaggerv1.Canary{ TypeMeta: metav1.TypeMeta{APIVersion: flaggerv1.SchemeGroupVersion.String()}, ObjectMeta: metav1.ObjectMeta{ @@ -279,7 +279,7 @@ func newDaemonSetControllerTestCanary() *flaggerv1.Canary { }, Spec: flaggerv1.CanarySpec{ TargetRef: flaggerv1.CrossNamespaceObjectReference{ - Name: "podinfo", + Name: dc.name, APIVersion: "apps/v1", Kind: "DaemonSet", }, From 621150cce611a37f3487039ba581aeaa2038a14d Mon Sep 17 00:00:00 2001 From: Forrest Thomas Date: Tue, 1 Sep 2020 14:42:29 -0700 Subject: [PATCH 11/47] add e2e istio tests for inconsistent naming between service name and selector --- pkg/canary/deployment_controller_test.go | 6 +- test/e2e-istio-tests.sh | 78 +++++++++++++++++++++++- test/e2e-workload.yaml | 69 +++++++++++++++++++++ 3 files changed, 146 insertions(+), 7 deletions(-) diff --git a/pkg/canary/deployment_controller_test.go b/pkg/canary/deployment_controller_test.go index f7918287..5c5aef15 100644 --- a/pkg/canary/deployment_controller_test.go +++ b/pkg/canary/deployment_controller_test.go @@ -29,8 +29,7 @@ func TestDeploymentController_Sync_ConsistentNaming(t *testing.T) { assert.Equal(t, sourceImage, primaryImage) primarySelectorValue := depPrimary.Spec.Selector.MatchLabels[dc.label] - sourceSelectorValue := dep.Spec.Selector.MatchLabels[dc.label] - assert.Equal(t, primarySelectorValue, fmt.Sprintf("%s-primary", sourceSelectorValue)) + assert.Equal(t, primarySelectorValue, fmt.Sprintf("%s-primary", dc.labelValue)) hpaPrimary, err := mocks.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers("default").Get(context.TODO(), "podinfo-primary", metav1.GetOptions{}) require.NoError(t, err) @@ -51,8 +50,7 @@ func TestDeploymentController_Sync_InconsistentNaming(t *testing.T) { assert.Equal(t, sourceImage, primaryImage) primarySelectorValue := depPrimary.Spec.Selector.MatchLabels[dc.label] - sourceSelectorValue := dep.Spec.Selector.MatchLabels[dc.label] - assert.Equal(t, primarySelectorValue, fmt.Sprintf("%s-primary", sourceSelectorValue)) + assert.Equal(t, primarySelectorValue, fmt.Sprintf("%s-primary", dc.labelValue)) hpaPrimary, err := mocks.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers("default").Get(context.TODO(), "podinfo-primary", metav1.GetOptions{}) require.NoError(t, err) diff --git a/test/e2e-istio-tests.sh b/test/e2e-istio-tests.sh index 5dc4732c..58ea1052 100755 --- a/test/e2e-istio-tests.sh +++ b/test/e2e-istio-tests.sh @@ -44,7 +44,7 @@ spec: ) EOF -echo '>>> Initialising canary' +echo '>>> Initialising canaries' cat <>> Waiting for primary to be ready' retries=50 count=0 ok=false until ${ok}; do kubectl -n test get canary/podinfo | grep 'Initialized' && ok=true || ok=false + kubectl -n test get canary/podinfo-service | grep 'Initialized' && ok=true || ok=false sleep 5 count=$(($count + 1)) if [[ ${count} -eq ${retries} ]]; then @@ -115,8 +169,26 @@ done echo '✔ Canary initialization test passed' -kubectl -n test get svc/podinfo -oyaml | grep annotations-test -kubectl -n test get svc/podinfo -oyaml | grep labels-test +passed=$(kubectl -n test get svc/podinfo -oyaml 2>&1 | { grep annotations-test || true; }) +if [ -z "$passed" ]; then + echo -e '\u2716 podinfo annotations test failed' + exit 1 +fi +passed=$(kubectl -n test get svc/podinfo -oyaml 2>&1 | { grep labels-test || true; }) +if [ -z "$passed" ]; then + echo -e '\u2716 podinfo labels test failed' + exit 1 +fi +passed=$(kubectl -n test get svc/podinfo -o jsonpath='{.spec.selector.app}' 2>&1 | { grep podinfo-primary || true; }) +if [ -z "$passed" ]; then + echo -e '\u2716 podinfo selector test failed' + exit 1 +fi +passed=$(kubectl -n test get svc/podinfo-service -o jsonpath='{.spec.selector.app}' 2>&1 | { grep podinfo-primary || true; }) +if [ -z "$passed" ]; then + echo -e '\u2716 podinfo-service selector test failed' + exit 1 +fi echo '✔ Canary service custom metadata test passed' diff --git a/test/e2e-workload.yaml b/test/e2e-workload.yaml index 175803d9..8f0660c0 100644 --- a/test/e2e-workload.yaml +++ b/test/e2e-workload.yaml @@ -66,3 +66,72 @@ spec: requests: cpu: 1m memory: 16Mi +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: podinfo-service + namespace: test + labels: + app: podinfo +spec: + minReadySeconds: 5 + revisionHistoryLimit: 5 + progressDeadlineSeconds: 60 + strategy: + rollingUpdate: + maxUnavailable: 0 + type: RollingUpdate + selector: + matchLabels: + app: podinfo + template: + metadata: + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9797" + labels: + app: podinfo + spec: + containers: + - name: podinfod + image: stefanprodan/podinfo:3.1.0 + imagePullPolicy: IfNotPresent + ports: + - name: http + containerPort: 9898 + protocol: TCP + - name: http-metrics + containerPort: 9797 + protocol: TCP + - name: grpc + containerPort: 9999 + protocol: TCP + command: + - ./podinfo + - --port=9898 + - --port-metrics=9797 + - --grpc-port=9999 + - --grpc-service-name=podinfo + - --level=info + - --random-delay=false + - --random-error=false + livenessProbe: + httpGet: + port: 9898 + path: /healthz + initialDelaySeconds: 5 + timeoutSeconds: 5 + readinessProbe: + httpGet: + port: 9898 + path: /readyz + initialDelaySeconds: 5 + timeoutSeconds: 5 + resources: + limits: + cpu: 1000m + memory: 128Mi + requests: + cpu: 1m + memory: 16Mi \ No newline at end of file From 2abfec05c9e7aaed9cf52999638d905c11071571 Mon Sep 17 00:00:00 2001 From: Forrest Thomas Date: Tue, 1 Sep 2020 15:53:09 -0700 Subject: [PATCH 12/47] add e2e contour tests for inconsistent naming between service name and selector --- test/e2e-contour-tests.sh | 66 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/test/e2e-contour-tests.sh b/test/e2e-contour-tests.sh index 8c03498f..4b06f534 100755 --- a/test/e2e-contour-tests.sh +++ b/test/e2e-contour-tests.sh @@ -85,6 +85,59 @@ spec: logCmdOutput: "true" EOF +cat <>> Waiting for primary to be ready' retries=50 count=0 @@ -104,6 +157,19 @@ kubectl -n test get httpproxy podinfo -oyaml | grep 'projectcontour.io/ingress.c echo '✔ Canary initialization test passed' +passed=$(kubectl -n test get svc/podinfo -o jsonpath='{.spec.selector.app}' 2>&1 | { grep podinfo-primary || true; }) +if [ -z "$passed" ]; then + echo -e '\u2716 podinfo selector test failed' + exit 1 +fi +passed=$(kubectl -n test get svc/podinfo-service-canary -o jsonpath='{.spec.selector.app}' 2>&1 | { grep podinfo || true; }) +if [ -z "$passed" ]; then + echo -e '\u2716 podinfo-service selector test failed' + exit 1 +fi + +echo '✔ Canary service custom metadata test passed' + echo '>>> Triggering canary deployment' kubectl -n test set image deployment/podinfo podinfod=stefanprodan/podinfo:3.1.1 From 29c30569408e8fc5af7efc8db76133438120f3e8 Mon Sep 17 00:00:00 2001 From: Forrest Thomas Date: Wed, 2 Sep 2020 10:15:49 -0700 Subject: [PATCH 13/47] add e2e gloo tests for inconsistent naming between service name and selector --- test/e2e-gloo-tests.sh | 66 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/test/e2e-gloo-tests.sh b/test/e2e-gloo-tests.sh index 75bad44d..c2456ee6 100755 --- a/test/e2e-gloo-tests.sh +++ b/test/e2e-gloo-tests.sh @@ -88,6 +88,59 @@ spec: logCmdOutput: "true" EOF +cat <>> Waiting for primary to be ready' retries=50 count=0 @@ -105,6 +158,19 @@ done echo '✔ Canary initialization test passed' +passed=$(kubectl -n test get svc/podinfo -o jsonpath='{.spec.selector.app}' 2>&1 | { grep podinfo-primary || true; }) +if [ -z "$passed" ]; then + echo -e '\u2716 podinfo selector test failed' + exit 1 +fi +passed=$(kubectl -n test get svc/podinfo-service-canary -o jsonpath='{.spec.selector.app}' 2>&1 | { grep podinfo || true; }) +if [ -z "$passed" ]; then + echo -e '\u2716 podinfo-service selector test failed' + exit 1 +fi + +echo '✔ Canary service custom metadata test passed' + echo '>>> Triggering canary deployment' kubectl -n test set image deployment/podinfo podinfod=stefanprodan/podinfo:3.1.1 From 7ade97790e3334cf47902d27fb11a9ae7adf9f27 Mon Sep 17 00:00:00 2001 From: Forrest Thomas Date: Wed, 2 Sep 2020 10:16:41 -0700 Subject: [PATCH 14/47] update e2e istio test to query the canary service instead of the apex service --- test/e2e-istio-tests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e-istio-tests.sh b/test/e2e-istio-tests.sh index 58ea1052..adde9c1e 100755 --- a/test/e2e-istio-tests.sh +++ b/test/e2e-istio-tests.sh @@ -184,7 +184,7 @@ if [ -z "$passed" ]; then echo -e '\u2716 podinfo selector test failed' exit 1 fi -passed=$(kubectl -n test get svc/podinfo-service -o jsonpath='{.spec.selector.app}' 2>&1 | { grep podinfo-primary || true; }) +passed=$(kubectl -n test get svc/podinfo-service-canary -o jsonpath='{.spec.selector.app}' 2>&1 | { grep podinfo || true; }) if [ -z "$passed" ]; then echo -e '\u2716 podinfo-service selector test failed' exit 1 From 930eb8919dc9ba6df0cb4cacaacfddae9e26c0fd Mon Sep 17 00:00:00 2001 From: Forrest Thomas Date: Wed, 2 Sep 2020 10:47:41 -0700 Subject: [PATCH 15/47] add e2e linkerd tests for inconsistent naming between service name and selector --- test/e2e-linkerd-tests.sh | 66 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/test/e2e-linkerd-tests.sh b/test/e2e-linkerd-tests.sh index c93b3d73..d21a04fa 100755 --- a/test/e2e-linkerd-tests.sh +++ b/test/e2e-linkerd-tests.sh @@ -100,6 +100,59 @@ spec: logCmdOutput: "true" EOF +cat <>> Waiting for primary to be ready' retries=50 count=0 @@ -117,6 +170,19 @@ done echo '✔ Canary initialization test passed' +passed=$(kubectl -n test get svc/podinfo -o jsonpath='{.spec.selector.app}' 2>&1 | { grep podinfo-primary || true; }) +if [ -z "$passed" ]; then + echo -e '\u2716 podinfo selector test failed' + exit 1 +fi +passed=$(kubectl -n test get svc/podinfo-service-canary -o jsonpath='{.spec.selector.app}' 2>&1 | { grep podinfo || true; }) +if [ -z "$passed" ]; then + echo -e '\u2716 podinfo-service selector test failed' + exit 1 +fi + +echo '✔ Canary service custom metadata test passed' + echo '>>> Triggering canary deployment' kubectl -n test set image deployment/podinfo podinfod=stefanprodan/podinfo:3.1.1 From 7793f0b29d1c574210e1455e599728ebea38542a Mon Sep 17 00:00:00 2001 From: Forrest Thomas Date: Wed, 2 Sep 2020 12:35:53 -0700 Subject: [PATCH 16/47] add e2e nginx tests for inconsistent naming between service name and selector --- test/e2e-nginx-tests.sh | 66 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/test/e2e-nginx-tests.sh b/test/e2e-nginx-tests.sh index fe77002d..066d17dc 100755 --- a/test/e2e-nginx-tests.sh +++ b/test/e2e-nginx-tests.sh @@ -124,6 +124,59 @@ spec: cmd: "hey -z 2m -q 10 -c 2 -host app.example.com http://nginx-ingress-controller.ingress-nginx" EOF +cat <>> Waiting for primary to be ready' retries=50 count=0 @@ -141,6 +194,19 @@ done echo '✔ Canary initialization test passed' +passed=$(kubectl -n test get svc/podinfo -o jsonpath='{.spec.selector.app}' | { grep podinfo-primary || true; }) +if [ -z "$passed" ]; then + echo -e '\u2716 podinfo selector test failed' + exit 1 +fi +passed=$(kubectl -n test get svc/podinfo-service-canary -o jsonpath='{.spec.selector.app}' | { grep podinfo || true; }) +if [ -z "$passed" ]; then + echo -e '\u2716 podinfo-service selector test failed' + exit 1 +fi + +echo '✔ Canary service custom metadata test passed' + echo '>>> Triggering canary deployment' kubectl -n test set image deployment/podinfo podinfod=stefanprodan/podinfo:3.1.1 From 6c35f7611b077e154b73bfaabd14ad634b4c3675 Mon Sep 17 00:00:00 2001 From: Forrest Thomas Date: Fri, 4 Sep 2020 09:35:11 -0700 Subject: [PATCH 17/47] address PR review comments and remove unnecessary configuration from Canary CR in e2e tests --- test/e2e-contour-tests.sh | 25 ------------------------- test/e2e-gloo-tests.sh | 25 ------------------------- test/e2e-istio-tests.sh | 25 ------------------------- test/e2e-linkerd-tests.sh | 25 ------------------------- test/e2e-nginx-tests.sh | 25 ------------------------- 5 files changed, 125 deletions(-) diff --git a/test/e2e-contour-tests.sh b/test/e2e-contour-tests.sh index 4b06f534..59877228 100755 --- a/test/e2e-contour-tests.sh +++ b/test/e2e-contour-tests.sh @@ -100,11 +100,6 @@ spec: service: port: 9898 portDiscovery: true - apex: - annotations: - test: "annotations-test" - labels: - test: "labels-test" headers: request: add: @@ -116,26 +111,6 @@ spec: threshold: 15 maxWeight: 30 stepWeight: 10 - metrics: - - name: request-success-rate - thresholdRange: - min: 99 - interval: 1m - - name: latency - templateRef: - name: latency - namespace: istio-system - thresholdRange: - max: 500 - interval: 1m - webhooks: - - name: load-test - url: http://flagger-loadtester.test/ - timeout: 5s - metadata: - type: cmd - cmd: "hey -z 10m -q 10 -c 2 http://podinfo.test:9898/" - logCmdOutput: "true" EOF echo '>>> Waiting for primary to be ready' diff --git a/test/e2e-gloo-tests.sh b/test/e2e-gloo-tests.sh index c2456ee6..a52b7221 100755 --- a/test/e2e-gloo-tests.sh +++ b/test/e2e-gloo-tests.sh @@ -103,11 +103,6 @@ spec: service: port: 9898 portDiscovery: true - apex: - annotations: - test: "annotations-test" - labels: - test: "labels-test" headers: request: add: @@ -119,26 +114,6 @@ spec: threshold: 15 maxWeight: 30 stepWeight: 10 - metrics: - - name: request-success-rate - thresholdRange: - min: 99 - interval: 1m - - name: latency - templateRef: - name: latency - namespace: istio-system - thresholdRange: - max: 500 - interval: 1m - webhooks: - - name: load-test - url: http://flagger-loadtester.test/ - timeout: 5s - metadata: - type: cmd - cmd: "hey -z 10m -q 10 -c 2 http://podinfo.test:9898/" - logCmdOutput: "true" EOF echo '>>> Waiting for primary to be ready' diff --git a/test/e2e-istio-tests.sh b/test/e2e-istio-tests.sh index adde9c1e..45e85382 100755 --- a/test/e2e-istio-tests.sh +++ b/test/e2e-istio-tests.sh @@ -113,11 +113,6 @@ spec: service: port: 9898 portDiscovery: true - apex: - annotations: - test: "annotations-test" - labels: - test: "labels-test" headers: request: add: @@ -129,26 +124,6 @@ spec: threshold: 15 maxWeight: 30 stepWeight: 10 - metrics: - - name: request-success-rate - thresholdRange: - min: 99 - interval: 1m - - name: latency - templateRef: - name: latency - namespace: istio-system - thresholdRange: - max: 500 - interval: 1m - webhooks: - - name: load-test - url: http://flagger-loadtester.test/ - timeout: 5s - metadata: - type: cmd - cmd: "hey -z 10m -q 10 -c 2 http://podinfo.test:9898/" - logCmdOutput: "true" EOF echo '>>> Waiting for primary to be ready' diff --git a/test/e2e-linkerd-tests.sh b/test/e2e-linkerd-tests.sh index d21a04fa..d8d1d556 100755 --- a/test/e2e-linkerd-tests.sh +++ b/test/e2e-linkerd-tests.sh @@ -115,11 +115,6 @@ spec: service: port: 9898 portDiscovery: true - apex: - annotations: - test: "annotations-test" - labels: - test: "labels-test" headers: request: add: @@ -131,26 +126,6 @@ spec: threshold: 15 maxWeight: 30 stepWeight: 10 - metrics: - - name: request-success-rate - thresholdRange: - min: 99 - interval: 1m - - name: latency - templateRef: - name: latency - namespace: istio-system - thresholdRange: - max: 500 - interval: 1m - webhooks: - - name: load-test - url: http://flagger-loadtester.test/ - timeout: 5s - metadata: - type: cmd - cmd: "hey -z 10m -q 10 -c 2 http://podinfo.test:9898/" - logCmdOutput: "true" EOF echo '>>> Waiting for primary to be ready' diff --git a/test/e2e-nginx-tests.sh b/test/e2e-nginx-tests.sh index 066d17dc..da979601 100755 --- a/test/e2e-nginx-tests.sh +++ b/test/e2e-nginx-tests.sh @@ -139,11 +139,6 @@ spec: service: port: 9898 portDiscovery: true - apex: - annotations: - test: "annotations-test" - labels: - test: "labels-test" headers: request: add: @@ -155,26 +150,6 @@ spec: threshold: 15 maxWeight: 30 stepWeight: 10 - metrics: - - name: request-success-rate - thresholdRange: - min: 99 - interval: 1m - - name: latency - templateRef: - name: latency - namespace: istio-system - thresholdRange: - max: 500 - interval: 1m - webhooks: - - name: load-test - url: http://flagger-loadtester.test/ - timeout: 5s - metadata: - type: cmd - cmd: "hey -z 10m -q 10 -c 2 http://podinfo.test:9898/" - logCmdOutput: "true" EOF echo '>>> Waiting for primary to be ready' From 2c249e2a922296a53a54db0a5a53f131c8575764 Mon Sep 17 00:00:00 2001 From: Filip Petkovski Date: Wed, 9 Sep 2020 11:46:11 +0200 Subject: [PATCH 18/47] Add New Relic as a metrics provider --- pkg/metrics/providers/factory.go | 2 + pkg/metrics/providers/newrelic.go | 159 +++++++++++++++++++++++++ pkg/metrics/providers/newrelic_test.go | 126 ++++++++++++++++++++ 3 files changed, 287 insertions(+) create mode 100644 pkg/metrics/providers/newrelic.go create mode 100644 pkg/metrics/providers/newrelic_test.go diff --git a/pkg/metrics/providers/factory.go b/pkg/metrics/providers/factory.go index 55e87d91..a1c32650 100644 --- a/pkg/metrics/providers/factory.go +++ b/pkg/metrics/providers/factory.go @@ -18,6 +18,8 @@ func (factory Factory) Provider( return NewDatadogProvider(metricInterval, provider, credentials) case "cloudwatch": return NewCloudWatchProvider(metricInterval, provider) + case "newrelic": + return NewNewRelicProvider(metricInterval, provider, credentials) default: return NewPrometheusProvider(provider, credentials) } diff --git a/pkg/metrics/providers/newrelic.go b/pkg/metrics/providers/newrelic.go new file mode 100644 index 00000000..daa19977 --- /dev/null +++ b/pkg/metrics/providers/newrelic.go @@ -0,0 +1,159 @@ +package providers + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "time" + + flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1beta1" +) + +const ( + newrelicInsightsDefaultHost = "https://insights-api.newrelic.com" + + newrelicQueryKeySecretKey = "newrelic_query_key" + newrelicAccountIdSecretKey = "newrelic_account_id" + + newrelicQueryKeyHeaderKey = "X-Query-Key" +) + +// NewRelicProvider executes newrelic queries +type NewRelicProvider struct { + insightsQueryEndpoint string + + timeout time.Duration + queryKey string + fromDelta int64 +} + +type newRelicResponse struct { + Results []struct { + Result *float64 `json:"result"` + } `json:"results"` +} + +// NewNewRelicProvider takes a canary spec, a provider spec and the credentials map, and +// returns a NewRelic client ready to execute queries against the Insights API +func NewNewRelicProvider( + metricInterval string, + provider flaggerv1.MetricTemplateProvider, + credentials map[string][]byte, +) (*NewRelicProvider, error) { + address := provider.Address + if address == "" { + address = newrelicInsightsDefaultHost + } + + accountId, ok := credentials[newrelicAccountIdSecretKey] + if !ok { + return nil, fmt.Errorf("newrelic credentials does not contain " + newrelicAccountIdSecretKey) + } + + queryEndpoint := fmt.Sprintf("%s/v1/accounts/%s/query", address, accountId) + nr := NewRelicProvider{ + timeout: 5 * time.Second, + insightsQueryEndpoint: queryEndpoint, + } + + if b, ok := credentials[newrelicQueryKeySecretKey]; ok { + nr.queryKey = string(b) + } else { + return nil, fmt.Errorf("newrelic credentials does not contain " + newrelicQueryKeySecretKey) + } + + md, err := time.ParseDuration(metricInterval) + if err != nil { + return nil, fmt.Errorf("error parsing metric interval: %w", err) + } + + nr.fromDelta = int64(md.Seconds()) + return &nr, nil +} + +// RunQuery executes the new relic query against the New Relic Insights API +// and returns the the first result +func (p *NewRelicProvider) RunQuery(query string) (float64, error) { + req, err := p.newInsightsRequest(query) + if err != nil { + return 0, err + } + + ctx, cancel := context.WithTimeout(req.Context(), p.timeout) + defer cancel() + r, err := http.DefaultClient.Do(req.WithContext(ctx)) + if err != nil { + return 0, fmt.Errorf("request failed: %w", err) + } + + defer r.Body.Close() + b, err := ioutil.ReadAll(r.Body) + if err != nil { + return 0, fmt.Errorf("error reading body: %w", err) + } + + if r.StatusCode != http.StatusOK { + return 0, fmt.Errorf("error response: %s: %w", string(b), err) + } + + var res newRelicResponse + if err := json.Unmarshal(b, &res); err != nil { + return 0, fmt.Errorf("error unmarshaling result: %w, '%s'", err, string(b)) + } + + if len(res.Results) != 1 { + return 0, fmt.Errorf("invalid response: %s: %w", string(b), ErrNoValuesFound) + } + + if res.Results[0].Result == nil { + return 0, fmt.Errorf("invalid response: %s: %w", string(b), ErrNoValuesFound) + } + + return *res.Results[0].Result, nil +} + +// IsOnline calls the NewRelic's insights API with +// and returns an error if the request is rejected +func (p *NewRelicProvider) IsOnline() (bool, error) { + req, err := p.newInsightsRequest("SELECT * FROM Metric") + if err != nil { + return false, fmt.Errorf("error http.NewRequest: %w", err) + } + + ctx, cancel := context.WithTimeout(req.Context(), p.timeout) + defer cancel() + r, err := http.DefaultClient.Do(req.WithContext(ctx)) + if err != nil { + return false, fmt.Errorf("request failed: %w", err) + } + + defer r.Body.Close() + + b, err := ioutil.ReadAll(r.Body) + if err != nil { + return false, fmt.Errorf("error reading body: %w", err) + } + + if r.StatusCode != http.StatusOK { + return false, fmt.Errorf("error response: %s", string(b)) + } + + return true, nil +} + +func (p *NewRelicProvider) newInsightsRequest(query string) (*http.Request, error) { + req, err := http.NewRequest("GET", p.insightsQueryEndpoint, nil) + if err != nil { + return nil, fmt.Errorf("error http.NewRequest: %w", err) + } + + req.Header.Set(newrelicQueryKeyHeaderKey, p.queryKey) + + q := req.URL.Query() + q.Add("nrql", fmt.Sprintf("%s SINCE %d seconds ago", query, p.fromDelta)) + req.URL.RawQuery = q.Encode() + + return req, nil +} diff --git a/pkg/metrics/providers/newrelic_test.go b/pkg/metrics/providers/newrelic_test.go new file mode 100644 index 00000000..5552665b --- /dev/null +++ b/pkg/metrics/providers/newrelic_test.go @@ -0,0 +1,126 @@ +package providers + +import ( + "errors" + "fmt" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1beta1" +) + +func TestNewNewRelicProvider(t *testing.T) { + queryKey := "query-key" + accountId := "51312" + cs := map[string][]byte{ + "newrelic_query_key": []byte(queryKey), + "newrelic_account_id": []byte(accountId), + } + + duration := "100s" + secondsDuration, err := time.ParseDuration(duration) + require.NoError(t, err) + + nr, err := NewNewRelicProvider("100s", flaggerv1.MetricTemplateProvider{}, cs) + require.NoError(t, err) + assert.Equal(t, "https://insights-api.newrelic.com/v1/accounts/51312/query", nr.insightsQueryEndpoint) + assert.Equal(t, int64(secondsDuration.Seconds()), nr.fromDelta) + assert.Equal(t, queryKey, nr.queryKey) +} + +func TestNewRelicProvider_RunQuery(t *testing.T) { + queryKey := "query-key" + accountId := "51312" + t.Run("ok", func(t *testing.T) { + q := `SELECT sum(nginx_ingress_controller_requests) / 1 FROM Metric WHERE status = '200'` + eq := `SELECT sum(nginx_ingress_controller_requests) / 1 FROM Metric WHERE status = '200' SINCE 60 seconds ago` + er := 1.11111 + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + aq := r.URL.Query().Get("nrql") + assert.Equal(t, eq, aq) + assert.Equal(t, queryKey, r.Header.Get(newrelicQueryKeyHeaderKey)) + + json := fmt.Sprintf(`{"results":[{"result": %f}]}`, er) + w.Write([]byte(json)) + })) + defer ts.Close() + + nr, err := NewNewRelicProvider("1m", + flaggerv1.MetricTemplateProvider{ + Address: ts.URL, + }, + map[string][]byte{ + "newrelic_query_key": []byte(queryKey), + "newrelic_account_id": []byte(accountId), + }, + ) + require.NoError(t, err) + + f, err := nr.RunQuery(q) + assert.NoError(t, err) + assert.Equal(t, er, f) + }) + + t.Run("no values", func(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + json := fmt.Sprintf(`{"results": []}`) + w.Write([]byte(json)) + })) + defer ts.Close() + + dp, err := NewNewRelicProvider( + "1m", + flaggerv1.MetricTemplateProvider{Address: ts.URL}, + map[string][]byte{ + "newrelic_query_key": []byte(queryKey), + "newrelic_account_id": []byte(accountId)}, + ) + require.NoError(t, err) + _, err = dp.RunQuery("") + require.True(t, errors.Is(err, ErrNoValuesFound)) + }) +} + +func TestNewReelicProvider_IsOnline(t *testing.T) { + for _, c := range []struct { + code int + errExpected bool + }{ + {code: http.StatusOK, errExpected: false}, + {code: http.StatusUnauthorized, errExpected: true}, + } { + t.Run(fmt.Sprintf("%d", c.code), func(t *testing.T) { + queryKey := "query-key" + accountId := "51312" + query := `SELECT * FROM Metric SINCE 60 seconds ago` + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, queryKey, r.Header.Get(newrelicQueryKeyHeaderKey)) + assert.Equal(t, query, r.URL.Query().Get("nrql")) + w.WriteHeader(c.code) + })) + defer ts.Close() + + dp, err := NewNewRelicProvider( + "1m", + flaggerv1.MetricTemplateProvider{Address: ts.URL}, + map[string][]byte{ + "newrelic_query_key": []byte(queryKey), + "newrelic_account_id": []byte(accountId), + }, + ) + require.NoError(t, err) + + _, err = dp.IsOnline() + if c.errExpected { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} From 68e4e1cc68bf6077a884298c611dac762b1a7294 Mon Sep 17 00:00:00 2001 From: Filip Petkovski Date: Wed, 9 Sep 2020 13:51:27 +0200 Subject: [PATCH 19/47] Apply suggestions from code review Co-authored-by: Stefan Prodan --- pkg/metrics/providers/newrelic.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/metrics/providers/newrelic.go b/pkg/metrics/providers/newrelic.go index daa19977..6480adb8 100644 --- a/pkg/metrics/providers/newrelic.go +++ b/pkg/metrics/providers/newrelic.go @@ -49,7 +49,7 @@ func NewNewRelicProvider( accountId, ok := credentials[newrelicAccountIdSecretKey] if !ok { - return nil, fmt.Errorf("newrelic credentials does not contain " + newrelicAccountIdSecretKey) + return nil, fmt.Errorf("newrelic credentials does not contain the key '%s'", newrelicAccountIdSecretKey) } queryEndpoint := fmt.Sprintf("%s/v1/accounts/%s/query", address, accountId) @@ -61,7 +61,7 @@ func NewNewRelicProvider( if b, ok := credentials[newrelicQueryKeySecretKey]; ok { nr.queryKey = string(b) } else { - return nil, fmt.Errorf("newrelic credentials does not contain " + newrelicQueryKeySecretKey) + return nil, fmt.Errorf("newrelic credentials does not contain the key ''%s", newrelicQueryKeySecretKey) } md, err := time.ParseDuration(metricInterval) From c81e19c48a7007ff4caa356da7b529242e8a87bc Mon Sep 17 00:00:00 2001 From: Filip Petkovski Date: Wed, 9 Sep 2020 18:10:58 +0200 Subject: [PATCH 20/47] Add newrelic as to the provider type enum --- artifacts/flagger/crd.yaml | 1 + charts/flagger/crds/crd.yaml | 1 + kustomize/base/flagger/crd.yaml | 1 + 3 files changed, 3 insertions(+) diff --git a/artifacts/flagger/crd.yaml b/artifacts/flagger/crd.yaml index a56b9e1a..37311bab 100644 --- a/artifacts/flagger/crd.yaml +++ b/artifacts/flagger/crd.yaml @@ -796,6 +796,7 @@ spec: - influxdb - datadog - cloudwatch + - newrelic address: description: API address of this provider type: string diff --git a/charts/flagger/crds/crd.yaml b/charts/flagger/crds/crd.yaml index a56b9e1a..37311bab 100644 --- a/charts/flagger/crds/crd.yaml +++ b/charts/flagger/crds/crd.yaml @@ -796,6 +796,7 @@ spec: - influxdb - datadog - cloudwatch + - newrelic address: description: API address of this provider type: string diff --git a/kustomize/base/flagger/crd.yaml b/kustomize/base/flagger/crd.yaml index a56b9e1a..37311bab 100644 --- a/kustomize/base/flagger/crd.yaml +++ b/kustomize/base/flagger/crd.yaml @@ -796,6 +796,7 @@ spec: - influxdb - datadog - cloudwatch + - newrelic address: description: API address of this provider type: string From 563b1cd88d4a76ec95e5b7d7bd4e6c43726d0e1c Mon Sep 17 00:00:00 2001 From: Filip Petkovski Date: Thu, 10 Sep 2020 08:48:10 +0200 Subject: [PATCH 21/47] Add New Relic provider to the documentation --- docs/gitbook/usage/metrics.md | 53 +++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) diff --git a/docs/gitbook/usage/metrics.md b/docs/gitbook/usage/metrics.md index 8ba9c8e7..0dc7ac21 100644 --- a/docs/gitbook/usage/metrics.md +++ b/docs/gitbook/usage/metrics.md @@ -314,3 +314,56 @@ Reference the template in the canary analysis: ``` **Note** that Flagger need AWS IAM permission to perform `cloudwatch:GetMetricData` to use this provider. + +### New Relic + +You can create custom metric checks using the New Relic provider. + +Create a secret with your New Relic Insights credentials: + +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: newrelic + namespace: istio-system +data: + newrelic_account_id: your-account-id + newrelic_query_key: your-insights-query-key +``` + +New Relic template example: + +```yaml +apiVersion: flagger.app/v1beta1 +kind: MetricTemplate +metadata: + name: newrelic-error-rate + namespace: istio-system +spec: + provider: + type: newrelic + secretRef: + name: newrelic + query: | + SELECT + filter(sum(nginx_ingress_controller_requests), WHERE status >= '500') / + sum(nginx_ingress_controller_requests) * 100 + FROM Metric + WHERE metricName = 'nginx_ingress_controller_requests' + AND ingress = '{{ ingress }}' AND namespace = '{{ namespace }}' +``` + +Reference the template in the canary analysis: + +```yaml + analysis: + metrics: + - name: "error rate" + templateRef: + name: newrelic-error-rate + namespace: istio-system + thresholdRange: + max: 5 + interval: 1m +``` From 8b3296c065b6f7affbcb403bdf6cba9273065bcc Mon Sep 17 00:00:00 2001 From: Filip Petkovski Date: Thu, 10 Sep 2020 09:19:36 +0200 Subject: [PATCH 22/47] Apply suggestions from code review Co-authored-by: Stefan Prodan --- docs/gitbook/usage/metrics.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/gitbook/usage/metrics.md b/docs/gitbook/usage/metrics.md index 0dc7ac21..84b68a35 100644 --- a/docs/gitbook/usage/metrics.md +++ b/docs/gitbook/usage/metrics.md @@ -339,7 +339,7 @@ apiVersion: flagger.app/v1beta1 kind: MetricTemplate metadata: name: newrelic-error-rate - namespace: istio-system + namespace: ingress-nginx spec: provider: type: newrelic @@ -362,7 +362,7 @@ Reference the template in the canary analysis: - name: "error rate" templateRef: name: newrelic-error-rate - namespace: istio-system + namespace: ingress-nginx thresholdRange: max: 5 interval: 1m From d62e7f678f0cd4bb53d706174b9f5039f98e5bdf Mon Sep 17 00:00:00 2001 From: Daniel Haarhoff Date: Thu, 10 Sep 2020 12:22:05 +0100 Subject: [PATCH 23/47] Add eLife to orgs using flagger --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 8a931f81..3bf35853 100644 --- a/README.md +++ b/README.md @@ -50,6 +50,7 @@ List of organizations using Flagger: * [MediaMarktSaturn](https://www.mediamarktsaturn.com) * [Weaveworks](https://weave.works) * [Jumia Group](https://group.jumia.com) +* [eLife](https://elifesciences.org/) If you are using Flagger, please submit a PR to add your organization to the list! From 29075264526234d057f191bc11c9da51781f75d5 Mon Sep 17 00:00:00 2001 From: Daniel Albuquerque Date: Mon, 14 Sep 2020 19:46:35 +0100 Subject: [PATCH 24/47] Do not promote when not ready on skip analysis --- pkg/controller/scheduler.go | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/pkg/controller/scheduler.go b/pkg/controller/scheduler.go index 11fac989..db9edfa1 100644 --- a/pkg/controller/scheduler.go +++ b/pkg/controller/scheduler.go @@ -236,7 +236,7 @@ func (c *Controller) advanceCanary(name string, namespace string) { } // check if analysis should be skipped - if skip := c.shouldSkipAnalysis(cd, canaryController, meshRouter); skip { + if skip := c.shouldSkipAnalysis(cd, canaryController, meshRouter, err, retriable); skip { return } @@ -616,11 +616,20 @@ func (c *Controller) runAnalysis(canary *flaggerv1.Canary) bool { return true } -func (c *Controller) shouldSkipAnalysis(canary *flaggerv1.Canary, canaryController canary.Controller, meshRouter router.Interface) bool { +func (c *Controller) shouldSkipAnalysis(canary *flaggerv1.Canary, canaryController canary.Controller, meshRouter router.Interface, err error, retriable bool) bool { if !canary.SkipAnalysis() { return false } + // regardless if analysis is being skipped, rollback if canary failed to progress + if !retriable || canary.Status.FailedChecks >= canary.GetAnalysisThreshold() { + c.recordEventWarningf(canary, "Rolling back %s.%s progress deadline exceeded %v", canary.Name, canary.Namespace, err) + c.alert(canary, fmt.Sprintf("Progress deadline exceeded %v", err), false, flaggerv1.SeverityError) + c.rollback(canary, canaryController, meshRouter) + + return true + } + // route all traffic to primary primaryWeight := 100 canaryWeight := 0 From 013949a9f45576b63995bcd8a6afe80226e468f6 Mon Sep 17 00:00:00 2001 From: Daniel Albuquerque Date: Fri, 18 Sep 2020 17:59:16 +0100 Subject: [PATCH 25/47] Add tests for when canary analysis is skipped --- .circleci/config.yml | 1 + pkg/apis/flagger/v1beta1/canary.go | 4 +- test/e2e-istio-tests-skip-analysis.sh | 192 ++++++++++++++++++++++++++ 3 files changed, 196 insertions(+), 1 deletion(-) create mode 100755 test/e2e-istio-tests-skip-analysis.sh diff --git a/.circleci/config.yml b/.circleci/config.yml index 09c6a598..1ab3bd28 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -95,6 +95,7 @@ jobs: - run: test/e2e-kind.sh v1.18.2 - run: test/e2e-istio.sh - run: test/e2e-istio-tests.sh + - run: test/e2e-istio-tests-skip-analysis.sh e2e-gloo-testing: machine: true diff --git a/pkg/apis/flagger/v1beta1/canary.go b/pkg/apis/flagger/v1beta1/canary.go index 3451e796..52bc64a4 100644 --- a/pkg/apis/flagger/v1beta1/canary.go +++ b/pkg/apis/flagger/v1beta1/canary.go @@ -428,7 +428,9 @@ func (c *Canary) GetMetricInterval() string { // SkipAnalysis returns true if the analysis is nil // or if spec.SkipAnalysis is true func (c *Canary) SkipAnalysis() bool { - if c.Spec.Analysis == nil && c.Spec.CanaryAnalysis == nil { + // log.Printf("#1 SkipAnalysis, analysis=%v canaryanalysis=%v", c.Spec.Analysis, c.Spec.CanaryAnalysis) + + if c.Spec.Analysis == nil || c.Spec.CanaryAnalysis == nil { return true } return c.Spec.SkipAnalysis diff --git a/test/e2e-istio-tests-skip-analysis.sh b/test/e2e-istio-tests-skip-analysis.sh new file mode 100755 index 00000000..7ad419dd --- /dev/null +++ b/test/e2e-istio-tests-skip-analysis.sh @@ -0,0 +1,192 @@ +#!/usr/bin/env bash + +# This script runs e2e tests for when the canary analysis is skipped +# Prerequisites: Kubernetes Kind and Istio + +set -o errexit + +REPO_ROOT=$(git rev-parse --show-toplevel) + +echo '>>> Creating test namespace' +kubectl create namespace test +kubectl label namespace test istio-injection=enabled + +echo '>>> Installing the load tester' +kubectl apply -k ${REPO_ROOT}/kustomize/tester +kubectl -n test rollout status deployment/flagger-loadtester + +echo '>>> Deploy podinfo' +kubectl apply -f ${REPO_ROOT}/test/e2e-workload.yaml + +echo '>>> Create latency metric template' +cat <>> Initialising canary' +cat <>> Waiting for primary to be ready' +retries=50 +count=0 +ok=false +until ${ok}; do + kubectl -n test get canary/podinfo | grep 'Initialized' && ok=true || ok=false + sleep 5 + count=$(($count + 1)) + if [[ ${count} -eq ${retries} ]]; then + kubectl -n istio-system logs deployment/flagger + echo "No more retries left" + exit 1 + fi +done + +echo '✔ Canary initialization test passed' + +kubectl -n test get svc/podinfo -oyaml | grep annotations-test +kubectl -n test get svc/podinfo -oyaml | grep labels-test + +echo '✔ Canary service custom metadata test passed' + +echo '>>> Triggering canary deployment' +kubectl -n test set image deployment/podinfo podinfod=stefanprodan/podinfo:3.1.1 + +echo '>>> Waiting for canary promotion' +retries=50 +count=0 +ok=false +until ${ok}; do + kubectl -n test describe deployment/podinfo-primary | grep '3.1.1' && ok=true || ok=false + sleep 10 + kubectl -n istio-system logs deployment/flagger --tail 1 + count=$(($count + 1)) + if [[ ${count} -eq ${retries} ]]; then + kubectl -n test describe deployment/podinfo + kubectl -n test describe deployment/podinfo-primary + kubectl -n istio-system logs deployment/flagger + echo "No more retries left" + exit 1 + fi +done + +echo '>>> Waiting for canary finalization' +retries=50 +count=0 +ok=false +until ${ok}; do + kubectl -n test get canary/podinfo | grep 'Succeeded' && ok=true || ok=false + sleep 5 + count=$(($count + 1)) + if [[ ${count} -eq ${retries} ]]; then + kubectl -n istio-system logs deployment/flagger + echo "No more retries left" + exit 1 + fi +done + +echo '✔ Canary promotion test passed' + +if [[ "$1" = "canary" ]]; then + exit 0 +fi + +echo '>>> Triggering canary deployment with a bad release (unknown docker image)' +kubectl -n test set image deployment/podinfo podinfod=stefanprodan/potato:1.0.0 + +echo '>>> Waiting for canary to fail' +retries=50 +count=0 +ok=false +until ${ok}; do + kubectl get canary/podinfo -n test -o=jsonpath='{.status.phase}' | grep 'Failed' && ok=true || ok=false + sleep 10 + kubectl -n istio-system logs deployment/flagger --tail 1 + count=$(($count + 1)) + if [[ ${count} -eq ${retries} ]]; then + kubectl -n test describe deployment/podinfo + kubectl -n test describe deployment/podinfo-primary + kubectl -n istio-system logs deployment/flagger + echo "No more retries left" + exit 1 + fi +done + +echo '>>> Confirm primary pod still running correct version' +retries=50 +count=0 +ok=false +until ${ok}; do + kubectl -n test describe deployment/podinfo-primary | grep '3.1.1' && ok=true || ok=false + sleep 5 + count=$(($count + 1)) + if [[ ${count} -eq ${retries} ]]; then + kubectl -n istio-system logs deployment/flagger + echo "No more retries left" + exit 1 + fi +done + +kubectl -n istio-system logs deployment/flagger + +echo '✔ All tests passed' From 8119acb40a7fcc314bf01ac62a0344be4a02b59a Mon Sep 17 00:00:00 2001 From: Daniel Albuquerque Date: Fri, 18 Sep 2020 18:00:38 +0100 Subject: [PATCH 26/47] Remove comment :) --- pkg/apis/flagger/v1beta1/canary.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/pkg/apis/flagger/v1beta1/canary.go b/pkg/apis/flagger/v1beta1/canary.go index 52bc64a4..703c4405 100644 --- a/pkg/apis/flagger/v1beta1/canary.go +++ b/pkg/apis/flagger/v1beta1/canary.go @@ -428,8 +428,6 @@ func (c *Canary) GetMetricInterval() string { // SkipAnalysis returns true if the analysis is nil // or if spec.SkipAnalysis is true func (c *Canary) SkipAnalysis() bool { - // log.Printf("#1 SkipAnalysis, analysis=%v canaryanalysis=%v", c.Spec.Analysis, c.Spec.CanaryAnalysis) - if c.Spec.Analysis == nil || c.Spec.CanaryAnalysis == nil { return true } From 4b098cc7a259d3e1044b306b30df66f7a7743a92 Mon Sep 17 00:00:00 2001 From: Daniel Albuquerque Date: Fri, 18 Sep 2020 18:17:50 +0100 Subject: [PATCH 27/47] Better assertion for new tests --- test/e2e-istio-tests-skip-analysis.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/e2e-istio-tests-skip-analysis.sh b/test/e2e-istio-tests-skip-analysis.sh index 7ad419dd..66cc9e71 100755 --- a/test/e2e-istio-tests-skip-analysis.sh +++ b/test/e2e-istio-tests-skip-analysis.sh @@ -151,7 +151,7 @@ if [[ "$1" = "canary" ]]; then exit 0 fi -echo '>>> Triggering canary deployment with a bad release (unknown docker image)' +echo '>>> Triggering canary deployment with a bad release (non existent docker image)' kubectl -n test set image deployment/podinfo podinfod=stefanprodan/potato:1.0.0 echo '>>> Waiting for canary to fail' @@ -177,7 +177,7 @@ retries=50 count=0 ok=false until ${ok}; do - kubectl -n test describe deployment/podinfo-primary | grep '3.1.1' && ok=true || ok=false + kubectl get deployment podinfo-primary -n test -o jsonpath='{.spec.replicas}' | grep 1 && ok=true || ok=false sleep 5 count=$(($count + 1)) if [[ ${count} -eq ${retries} ]]; then From 0eee5b74023cdb320ae5984187ea6becf936c5d3 Mon Sep 17 00:00:00 2001 From: Daniel Albuquerque Date: Fri, 18 Sep 2020 18:43:27 +0100 Subject: [PATCH 28/47] Revert changes in skip analysis condition --- pkg/apis/flagger/v1beta1/canary.go | 2 +- test/e2e-istio-tests-skip-analysis.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/apis/flagger/v1beta1/canary.go b/pkg/apis/flagger/v1beta1/canary.go index 703c4405..3451e796 100644 --- a/pkg/apis/flagger/v1beta1/canary.go +++ b/pkg/apis/flagger/v1beta1/canary.go @@ -428,7 +428,7 @@ func (c *Canary) GetMetricInterval() string { // SkipAnalysis returns true if the analysis is nil // or if spec.SkipAnalysis is true func (c *Canary) SkipAnalysis() bool { - if c.Spec.Analysis == nil || c.Spec.CanaryAnalysis == nil { + if c.Spec.Analysis == nil && c.Spec.CanaryAnalysis == nil { return true } return c.Spec.SkipAnalysis diff --git a/test/e2e-istio-tests-skip-analysis.sh b/test/e2e-istio-tests-skip-analysis.sh index 66cc9e71..c6f83db6 100755 --- a/test/e2e-istio-tests-skip-analysis.sh +++ b/test/e2e-istio-tests-skip-analysis.sh @@ -71,8 +71,8 @@ spec: x-envoy-upstream-rq-timeout-ms: "15000" x-envoy-max-retries: "10" x-envoy-retry-on: "gateway-error,connect-failure,refused-stream" + skipAnalysis: true analysis: - skipAnalysis: true interval: 15s threshold: 15 maxWeight: 30 From 26d53dcd44d140e26ba011566e6a301990353121 Mon Sep 17 00:00:00 2001 From: Daniel Albuquerque Date: Fri, 18 Sep 2020 19:05:45 +0100 Subject: [PATCH 29/47] diff test stucture for istio --- .circleci/config.yml | 1 + test/e2e-istio-dependencies.sh | 19 +++++++++++++++++++ test/e2e-istio-tests-skip-analysis.sh | 15 --------------- test/e2e-istio-tests.sh | 15 --------------- 4 files changed, 20 insertions(+), 30 deletions(-) create mode 100755 test/e2e-istio-dependencies.sh diff --git a/.circleci/config.yml b/.circleci/config.yml index 1ab3bd28..047423cb 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -94,6 +94,7 @@ jobs: - run: test/container-build.sh - run: test/e2e-kind.sh v1.18.2 - run: test/e2e-istio.sh + - run: test/e2e-istio-dependencies.sh - run: test/e2e-istio-tests.sh - run: test/e2e-istio-tests-skip-analysis.sh diff --git a/test/e2e-istio-dependencies.sh b/test/e2e-istio-dependencies.sh new file mode 100755 index 00000000..7bfe93cb --- /dev/null +++ b/test/e2e-istio-dependencies.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +# This script setups the scenarios for istio tests by creating a Kubernetes namespace, installing the load tester and a test workload (podinfo) +# Prerequisites: Kubernetes Kind and Istio + +set -o errexit + +REPO_ROOT=$(git rev-parse --show-toplevel) + +echo '>>> Creating test namespace' +kubectl create namespace test +kubectl label namespace test istio-injection=enabled + +echo '>>> Installing the load tester' +kubectl apply -k ${REPO_ROOT}/kustomize/tester +kubectl -n test rollout status deployment/flagger-loadtester + +echo '>>> Deploy podinfo' +kubectl apply -f ${REPO_ROOT}/test/e2e-workload.yaml diff --git a/test/e2e-istio-tests-skip-analysis.sh b/test/e2e-istio-tests-skip-analysis.sh index c6f83db6..6cada2eb 100755 --- a/test/e2e-istio-tests-skip-analysis.sh +++ b/test/e2e-istio-tests-skip-analysis.sh @@ -3,21 +3,6 @@ # This script runs e2e tests for when the canary analysis is skipped # Prerequisites: Kubernetes Kind and Istio -set -o errexit - -REPO_ROOT=$(git rev-parse --show-toplevel) - -echo '>>> Creating test namespace' -kubectl create namespace test -kubectl label namespace test istio-injection=enabled - -echo '>>> Installing the load tester' -kubectl apply -k ${REPO_ROOT}/kustomize/tester -kubectl -n test rollout status deployment/flagger-loadtester - -echo '>>> Deploy podinfo' -kubectl apply -f ${REPO_ROOT}/test/e2e-workload.yaml - echo '>>> Create latency metric template' cat <>> Creating test namespace' -kubectl create namespace test -kubectl label namespace test istio-injection=enabled - -echo '>>> Installing the load tester' -kubectl apply -k ${REPO_ROOT}/kustomize/tester -kubectl -n test rollout status deployment/flagger-loadtester - -echo '>>> Deploy podinfo' -kubectl apply -f ${REPO_ROOT}/test/e2e-workload.yaml - echo '>>> Create latency metric template' cat < Date: Fri, 18 Sep 2020 19:51:03 +0100 Subject: [PATCH 30/47] Remove custom metrics (not needed for tests) --- test/e2e-istio-tests-skip-analysis.sh | 26 -------------------------- 1 file changed, 26 deletions(-) diff --git a/test/e2e-istio-tests-skip-analysis.sh b/test/e2e-istio-tests-skip-analysis.sh index 6cada2eb..5b94026b 100755 --- a/test/e2e-istio-tests-skip-analysis.sh +++ b/test/e2e-istio-tests-skip-analysis.sh @@ -3,32 +3,6 @@ # This script runs e2e tests for when the canary analysis is skipped # Prerequisites: Kubernetes Kind and Istio -echo '>>> Create latency metric template' -cat <>> Initialising canary' cat < Date: Sat, 19 Sep 2020 15:15:39 +0100 Subject: [PATCH 31/47] Add set -o errexit --- test/e2e-istio-tests-skip-analysis.sh | 2 ++ test/e2e-istio-tests.sh | 2 ++ 2 files changed, 4 insertions(+) diff --git a/test/e2e-istio-tests-skip-analysis.sh b/test/e2e-istio-tests-skip-analysis.sh index 5b94026b..f6f1937e 100755 --- a/test/e2e-istio-tests-skip-analysis.sh +++ b/test/e2e-istio-tests-skip-analysis.sh @@ -3,6 +3,8 @@ # This script runs e2e tests for when the canary analysis is skipped # Prerequisites: Kubernetes Kind and Istio +set -o errexit + echo '>>> Initialising canary' cat <>> Create latency metric template' cat < Date: Sat, 19 Sep 2020 17:39:54 +0100 Subject: [PATCH 32/47] Remove metadata tests (unrelated to skip analysis) --- test/e2e-istio-tests-skip-analysis.sh | 23 ++++------------------- 1 file changed, 4 insertions(+), 19 deletions(-) diff --git a/test/e2e-istio-tests-skip-analysis.sh b/test/e2e-istio-tests-skip-analysis.sh index f6f1937e..621fe285 100755 --- a/test/e2e-istio-tests-skip-analysis.sh +++ b/test/e2e-istio-tests-skip-analysis.sh @@ -21,17 +21,6 @@ spec: service: port: 9898 portDiscovery: true - apex: - annotations: - test: "annotations-test" - labels: - test: "labels-test" - headers: - request: - add: - x-envoy-upstream-rq-timeout-ms: "15000" - x-envoy-max-retries: "10" - x-envoy-retry-on: "gateway-error,connect-failure,refused-stream" skipAnalysis: true analysis: interval: 15s @@ -65,11 +54,6 @@ done echo '✔ Canary initialization test passed' -kubectl -n test get svc/podinfo -oyaml | grep annotations-test -kubectl -n test get svc/podinfo -oyaml | grep labels-test - -echo '✔ Canary service custom metadata test passed' - echo '>>> Triggering canary deployment' kubectl -n test set image deployment/podinfo podinfod=stefanprodan/podinfo:3.1.1 @@ -133,12 +117,13 @@ until ${ok}; do fi done -echo '>>> Confirm primary pod still running correct version' +echo '>>> Confirm primary pod is still running and with correct version' retries=50 count=0 ok=false -until ${ok}; do - kubectl get deployment podinfo-primary -n test -o jsonpath='{.spec.replicas}' | grep 1 && ok=true || ok=false +until ${okImage} && ${okRunning}; do + kubectl get deployment podinfo-primary -n test -o jsonpath='{.spec.replicas}' | grep 1 && okRunning=true || okRunning=false + kubectl -n test describe deployment/podinfo-primary | grep '3.1.3' && okImage=true || okImage=false sleep 5 count=$(($count + 1)) if [[ ${count} -eq ${retries} ]]; then From f2608e627c5fc48b3554bc0f88748bd8dbb202c2 Mon Sep 17 00:00:00 2001 From: stefanprodan Date: Tue, 29 Sep 2020 09:06:47 +0300 Subject: [PATCH 33/47] Release v1.2.0 --- CHANGELOG.md | 21 +++++++++++++++++++++ artifacts/flagger/deployment.yaml | 2 +- charts/flagger/Chart.yaml | 4 ++-- charts/flagger/values.yaml | 4 ++-- charts/grafana/Chart.yaml | 4 ++-- charts/grafana/values.yaml | 4 ++-- charts/podinfo/Chart.yaml | 4 ++-- charts/podinfo/values.yaml | 4 ++-- kustomize/base/flagger/kustomization.yaml | 2 +- kustomize/base/prometheus/deployment.yaml | 2 +- pkg/version/version.go | 2 +- 11 files changed, 37 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8d36f224..b34576ca 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,27 @@ All notable changes to this project are documented in this file. +## 1.2.0 (2020-10-29) + +Add support for New Relic metrics + +#### Features + +- Add New Relic as a metrics provider + [#691](https://github.com/weaveworks/flagger/pull/691) + +#### Improvements + +- Derive the label selector value from the target matchLabel + [#685](https://github.com/weaveworks/flagger/pull/685) +- Preserve Skipper predicates + [#681](https://github.com/weaveworks/flagger/pull/681) + +#### Fixes + +- Do not promote when not ready on skip analysis + [#695](https://github.com/weaveworks/flagger/pull/695) + ## 1.1.0 (2020-08-18) Add support for Skipper ingress controller diff --git a/artifacts/flagger/deployment.yaml b/artifacts/flagger/deployment.yaml index a82da6b8..1915fd03 100644 --- a/artifacts/flagger/deployment.yaml +++ b/artifacts/flagger/deployment.yaml @@ -22,7 +22,7 @@ spec: serviceAccountName: flagger containers: - name: flagger - image: weaveworks/flagger:1.1.0 + image: weaveworks/flagger:1.2.0 imagePullPolicy: IfNotPresent ports: - name: http diff --git a/charts/flagger/Chart.yaml b/charts/flagger/Chart.yaml index 36dd58aa..9003ff25 100644 --- a/charts/flagger/Chart.yaml +++ b/charts/flagger/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v1 name: flagger -version: 1.1.0 -appVersion: 1.1.0 +version: 1.2.0 +appVersion: 1.2.0 kubeVersion: ">=1.11.0-0" engine: gotpl description: Flagger is a progressive delivery operator for Kubernetes diff --git a/charts/flagger/values.yaml b/charts/flagger/values.yaml index 3956f5ff..3b72e870 100644 --- a/charts/flagger/values.yaml +++ b/charts/flagger/values.yaml @@ -2,7 +2,7 @@ image: repository: weaveworks/flagger - tag: 1.1.0 + tag: 1.2.0 pullPolicy: IfNotPresent pullSecret: @@ -124,7 +124,7 @@ tolerations: [] prometheus: # to be used with ingress controllers install: false - image: docker.io/prom/prometheus:v2.19.0 + image: docker.io/prom/prometheus:v2.21.0 retention: 2h # Istio multi-cluster service mesh (shared control plane single-network) diff --git a/charts/grafana/Chart.yaml b/charts/grafana/Chart.yaml index d24d3947..b9fe9c3c 100644 --- a/charts/grafana/Chart.yaml +++ b/charts/grafana/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v1 name: grafana -version: 1.4.0 -appVersion: 6.5.1 +version: 1.5.0 +appVersion: 7.2.0 description: Grafana dashboards for monitoring Flagger canary deployments icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/weaveworks.png home: https://flagger.app diff --git a/charts/grafana/values.yaml b/charts/grafana/values.yaml index ddc90973..5f4cdc0c 100644 --- a/charts/grafana/values.yaml +++ b/charts/grafana/values.yaml @@ -6,7 +6,7 @@ replicaCount: 1 image: repository: grafana/grafana - tag: 6.5.1 + tag: 7.2.0 pullPolicy: IfNotPresent podAnnotations: {} @@ -32,7 +32,7 @@ affinity: {} user: admin password: -# Istio Prometheus instance +# Prometheus instance url: http://prometheus:9090 # Weave Cloud instance token diff --git a/charts/podinfo/Chart.yaml b/charts/podinfo/Chart.yaml index 0d52ce60..4bdab13e 100644 --- a/charts/podinfo/Chart.yaml +++ b/charts/podinfo/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v1 -version: 3.1.1 -appVersion: 3.1.0 +version: 5.0.0 +appVersion: 5.0.0 name: podinfo engine: gotpl description: Flagger canary deployment demo application diff --git a/charts/podinfo/values.yaml b/charts/podinfo/values.yaml index afe2d86a..83dbc7ac 100644 --- a/charts/podinfo/values.yaml +++ b/charts/podinfo/values.yaml @@ -1,7 +1,7 @@ # Default values for podinfo. image: - repository: stefanprodan/podinfo - tag: 3.1.0 + repository: ghcr.io/stefanprodan/podinfo + tag: 5.0.0 pullPolicy: IfNotPresent podAnnotations: {} diff --git a/kustomize/base/flagger/kustomization.yaml b/kustomize/base/flagger/kustomization.yaml index b4859564..82172e70 100644 --- a/kustomize/base/flagger/kustomization.yaml +++ b/kustomize/base/flagger/kustomization.yaml @@ -8,4 +8,4 @@ resources: - deployment.yaml images: - name: weaveworks/flagger - newTag: 1.1.0 + newTag: 1.2.0 diff --git a/kustomize/base/prometheus/deployment.yaml b/kustomize/base/prometheus/deployment.yaml index 2cdbff5b..a23b8158 100644 --- a/kustomize/base/prometheus/deployment.yaml +++ b/kustomize/base/prometheus/deployment.yaml @@ -19,7 +19,7 @@ spec: serviceAccountName: flagger-prometheus containers: - name: prometheus - image: prom/prometheus:v2.19.0 + image: prom/prometheus:v2.21.0 imagePullPolicy: IfNotPresent args: - '--storage.tsdb.retention=2h' diff --git a/pkg/version/version.go b/pkg/version/version.go index 1bd77e47..060592fd 100644 --- a/pkg/version/version.go +++ b/pkg/version/version.go @@ -1,4 +1,4 @@ package version -var VERSION = "1.1.0" +var VERSION = "1.2.0" var REVISION = "unknown" From 79f0381c5279fb0c3c4f64137ea512940f5ba3a5 Mon Sep 17 00:00:00 2001 From: Nate Tranel Date: Thu, 1 Oct 2020 08:06:39 -0600 Subject: [PATCH 34/47] fix spelling of template --- pkg/controller/scheduler_metrics.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/controller/scheduler_metrics.go b/pkg/controller/scheduler_metrics.go index 2f30bc98..8e15f887 100644 --- a/pkg/controller/scheduler_metrics.go +++ b/pkg/controller/scheduler_metrics.go @@ -65,7 +65,7 @@ func (c *Controller) checkMetricProviderAvailability(canary *flaggerv1.Canary) e } if ok, err := provider.IsOnline(); !ok || err != nil { - return fmt.Errorf("%v in metric tempalte %s.%s not avaiable: %v", template.Spec.Provider.Type, + return fmt.Errorf("%v in metric template %s.%s not avaiable: %v", template.Spec.Provider.Type, template.Name, template.Namespace, err) } } From 23e59168af547d1d9010a65eec8362d6cde61708 Mon Sep 17 00:00:00 2001 From: Daniel Albuquerque Date: Sun, 11 Oct 2020 14:10:16 +0100 Subject: [PATCH 35/47] Exclude controller labels by prefix --- charts/flagger/README.md | 1 + charts/flagger/templates/deployment.yaml | 3 ++ cmd/flagger/main.go | 6 ++- pkg/canary/daemonset_controller.go | 22 ++++++----- pkg/canary/deployment_controller.go | 24 ++++++------ pkg/canary/factory.go | 34 +++++++++-------- pkg/canary/util.go | 19 ++++++++++ pkg/canary/util_test.go | 38 +++++++++++++++++++ .../scheduler_daemonset_fixture_test.go | 2 +- .../scheduler_deployment_fixture_test.go | 2 +- 10 files changed, 112 insertions(+), 39 deletions(-) create mode 100644 pkg/canary/util_test.go diff --git a/charts/flagger/README.md b/charts/flagger/README.md index fa5589ae..b82e27c8 100644 --- a/charts/flagger/README.md +++ b/charts/flagger/README.md @@ -125,6 +125,7 @@ Parameter | Description | Default `serviceAccount.name` | The name of the service account to create or use. If not set and `serviceAccount.create` is `true`, a name is generated using the Flagger fullname | `""` `serviceAccount.annotations` | Annotations for service account | `{}` `ingressAnnotationsPrefix` | Annotations prefix for ingresses | `custom.ingress.kubernetes.io` +`excludedLabelsPrefixes` | List of prefixes of labels that are excluded when creating primary controllers | `"fluxcd,jenkins"` `rbac.create` | If `true`, create and use RBAC resources | `true` `rbac.pspEnabled` | If `true`, create and use a restricted pod security policy | `false` `crd.create` | If `true`, create Flagger's CRDs (should be enabled for Helm v2 only) | `false` diff --git a/charts/flagger/templates/deployment.yaml b/charts/flagger/templates/deployment.yaml index cb6ebd58..d67b4273 100644 --- a/charts/flagger/templates/deployment.yaml +++ b/charts/flagger/templates/deployment.yaml @@ -106,6 +106,9 @@ spec: {{- if .Values.ingressAnnotationsPrefix }} - -ingress-annotations-prefix={{ .Values.ingressAnnotationsPrefix }} {{- end }} + {{- if .Values.excludedLabelsPrefixes }} + - -excluded-labels-prefixes={{ .Values.excludedLabelsPrefixes }} + {{- end }} {{- if .Values.ingressClass }} - -ingress-class={{ .Values.ingressClass }} {{- end }} diff --git a/cmd/flagger/main.go b/cmd/flagger/main.go index 0ce5c76c..217d5b70 100644 --- a/cmd/flagger/main.go +++ b/cmd/flagger/main.go @@ -43,6 +43,7 @@ var ( logLevel string port string msteamsURL string + excludedLabelsPrefixes string slackURL string slackUser string slackChannel string @@ -74,6 +75,7 @@ func init() { flag.StringVar(&slackChannel, "slack-channel", "", "Slack channel.") flag.StringVar(&eventWebhook, "event-webhook", "", "Webhook for publishing flagger events") flag.StringVar(&msteamsURL, "msteams-url", "", "MS Teams incoming webhook URL.") + flag.StringVar(&excludedLabelsPrefixes, "excluded-labels-prefixes", "fluxcd,jenkins", "List of prefixes of labels that are excluded when creating primary controllers.") flag.IntVar(&threadiness, "threadiness", 2, "Worker concurrency.") flag.BoolVar(&zapReplaceGlobals, "zap-replace-globals", false, "Whether to change the logging level of the global zap logger.") flag.StringVar(&zapEncoding, "zap-encoding", "json", "Zap logger encoding.") @@ -184,7 +186,9 @@ func main() { configTracker = &canary.NopTracker{} } - canaryFactory := canary.NewFactory(kubeClient, flaggerClient, configTracker, labels, logger) + excludedLabelsPrefixesArray := strings.Split(excludedLabelsPrefixes, ",") + + canaryFactory := canary.NewFactory(kubeClient, flaggerClient, configTracker, labels, excludedLabelsPrefixesArray, logger) c := controller.NewController( kubeClient, diff --git a/pkg/canary/daemonset_controller.go b/pkg/canary/daemonset_controller.go index fdd656c1..d7582845 100644 --- a/pkg/canary/daemonset_controller.go +++ b/pkg/canary/daemonset_controller.go @@ -22,11 +22,12 @@ var ( // DaemonSetController is managing the operations for Kubernetes DaemonSet kind type DaemonSetController struct { - kubeClient kubernetes.Interface - flaggerClient clientset.Interface - logger *zap.SugaredLogger - configTracker Tracker - labels []string + kubeClient kubernetes.Interface + flaggerClient clientset.Interface + logger *zap.SugaredLogger + configTracker Tracker + labels []string + excludedLabelsPrefixes []string } func (c *DaemonSetController) ScaleToZero(cd *flaggerv1.Canary) error { @@ -76,7 +77,7 @@ func (c *DaemonSetController) ScaleFromZero(cd *flaggerv1.Canary) error { // Initialize creates the primary DaemonSet, scales down the canary DaemonSet, // and returns the pod selector label and container ports func (c *DaemonSetController) Initialize(cd *flaggerv1.Canary) (err error) { - err = c.createPrimaryDaemonSet(cd) + err = c.createPrimaryDaemonSet(cd, c.excludedLabelsPrefixes) if err != nil { return fmt.Errorf("createPrimaryDaemonSet failed: %w", err) } @@ -200,7 +201,7 @@ func (c *DaemonSetController) GetMetadata(cd *flaggerv1.Canary) (string, string, return label, labelValue, ports, nil } -func (c *DaemonSetController) createPrimaryDaemonSet(cd *flaggerv1.Canary) error { +func (c *DaemonSetController) createPrimaryDaemonSet(cd *flaggerv1.Canary, excludedLabelsPrefixes []string) error { targetName := cd.Spec.TargetRef.Name primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name) @@ -215,6 +216,9 @@ func (c *DaemonSetController) createPrimaryDaemonSet(cd *flaggerv1.Canary) error targetName, cd.Namespace, canaryDae.Spec.UpdateStrategy.Type) } + // Create the labels map but filter unwanted labels + labels := excludeLabelsByPrefix(canaryDae.Labels, excludedLabelsPrefixes) + label, labelValue, err := c.getSelectorLabel(canaryDae) primaryLabelValue := fmt.Sprintf("%s-primary", labelValue) if err != nil { @@ -241,9 +245,7 @@ func (c *DaemonSetController) createPrimaryDaemonSet(cd *flaggerv1.Canary) error ObjectMeta: metav1.ObjectMeta{ Name: primaryName, Namespace: cd.Namespace, - Labels: map[string]string{ - label: primaryLabelValue, - }, + Labels: labels, OwnerReferences: []metav1.OwnerReference{ *metav1.NewControllerRef(cd, schema.GroupVersionKind{ Group: flaggerv1.SchemeGroupVersion.Group, diff --git a/pkg/canary/deployment_controller.go b/pkg/canary/deployment_controller.go index 40f3ba91..f0fa37ef 100644 --- a/pkg/canary/deployment_controller.go +++ b/pkg/canary/deployment_controller.go @@ -20,18 +20,19 @@ import ( // DeploymentController is managing the operations for Kubernetes Deployment kind type DeploymentController struct { - kubeClient kubernetes.Interface - flaggerClient clientset.Interface - logger *zap.SugaredLogger - configTracker Tracker - labels []string + kubeClient kubernetes.Interface + flaggerClient clientset.Interface + logger *zap.SugaredLogger + configTracker Tracker + labels []string + excludedLabelsPrefixes []string } // Initialize creates the primary deployment, hpa, // scales to zero the canary deployment and returns the pod selector label and container ports func (c *DeploymentController) Initialize(cd *flaggerv1.Canary) (err error) { primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name) - if err := c.createPrimaryDeployment(cd); err != nil { + if err := c.createPrimaryDeployment(cd, c.excludedLabelsPrefixes); err != nil { return fmt.Errorf("createPrimaryDeployment failed: %w", err) } @@ -202,15 +203,18 @@ func (c *DeploymentController) GetMetadata(cd *flaggerv1.Canary) (string, string return label, labelValue, ports, nil } -func (c *DeploymentController) createPrimaryDeployment(cd *flaggerv1.Canary) error { +func (c *DeploymentController) createPrimaryDeployment(cd *flaggerv1.Canary, excludedLabelsPrefixes []string) error { targetName := cd.Spec.TargetRef.Name primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name) canaryDep, err := c.kubeClient.AppsV1().Deployments(cd.Namespace).Get(context.TODO(), targetName, metav1.GetOptions{}) if err != nil { - return fmt.Errorf("deplyoment %s.%s get query error: %w", targetName, cd.Namespace, err) + return fmt.Errorf("deployment %s.%s get query error: %w", targetName, cd.Namespace, err) } + // Create the labels map but filter unwanted labels + labels := excludeLabelsByPrefix(canaryDep.Labels, excludedLabelsPrefixes) + label, labelValue, err := c.getSelectorLabel(canaryDep) primaryLabelValue := fmt.Sprintf("%s-primary", labelValue) if err != nil { @@ -242,9 +246,7 @@ func (c *DeploymentController) createPrimaryDeployment(cd *flaggerv1.Canary) err ObjectMeta: metav1.ObjectMeta{ Name: primaryName, Namespace: cd.Namespace, - Labels: map[string]string{ - label: primaryLabelValue, - }, + Labels: labels, OwnerReferences: []metav1.OwnerReference{ *metav1.NewControllerRef(cd, schema.GroupVersionKind{ Group: flaggerv1.SchemeGroupVersion.Group, diff --git a/pkg/canary/factory.go b/pkg/canary/factory.go index 95f243b8..99db8041 100644 --- a/pkg/canary/factory.go +++ b/pkg/canary/factory.go @@ -8,34 +8,38 @@ import ( ) type Factory struct { - kubeClient kubernetes.Interface - flaggerClient clientset.Interface - logger *zap.SugaredLogger - configTracker Tracker - labels []string + kubeClient kubernetes.Interface + flaggerClient clientset.Interface + logger *zap.SugaredLogger + configTracker Tracker + labels []string + excludedLabelsPrefixes []string } func NewFactory(kubeClient kubernetes.Interface, flaggerClient clientset.Interface, configTracker Tracker, labels []string, + excludedLabelsPrefixes []string, logger *zap.SugaredLogger) *Factory { return &Factory{ - kubeClient: kubeClient, - flaggerClient: flaggerClient, - logger: logger, - configTracker: configTracker, - labels: labels, + kubeClient: kubeClient, + flaggerClient: flaggerClient, + logger: logger, + configTracker: configTracker, + labels: labels, + excludedLabelsPrefixes: excludedLabelsPrefixes, } } func (factory *Factory) Controller(kind string) Controller { deploymentCtrl := &DeploymentController{ - logger: factory.logger, - kubeClient: factory.kubeClient, - flaggerClient: factory.flaggerClient, - labels: factory.labels, - configTracker: factory.configTracker, + logger: factory.logger, + kubeClient: factory.kubeClient, + flaggerClient: factory.flaggerClient, + labels: factory.labels, + configTracker: factory.configTracker, + excludedLabelsPrefixes: factory.excludedLabelsPrefixes, } daemonSetCtrl := &DaemonSetController{ logger: factory.logger, diff --git a/pkg/canary/util.go b/pkg/canary/util.go index 254cd38a..d3554a93 100644 --- a/pkg/canary/util.go +++ b/pkg/canary/util.go @@ -4,6 +4,7 @@ import ( "crypto/rand" "fmt" "io" + "strings" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/intstr" @@ -75,6 +76,24 @@ func makeAnnotations(annotations map[string]string) (map[string]string, error) { return res, nil } +func excludeLabelsByPrefix(labels map[string]string, excludedLabelsPrefixes []string) map[string]string { + filteredLabels := make(map[string]string) + for key, value := range labels { + isPrefixExcluded := false + for _, excludeLabelPrefix := range excludedLabelsPrefixes { + if strings.HasPrefix(key, excludeLabelPrefix) { + isPrefixExcluded = true + break + } + } + if !isPrefixExcluded { + filteredLabels[key] = value + } + } + + return filteredLabels +} + func makePrimaryLabels(labels map[string]string, labelValue string, label string) map[string]string { res := make(map[string]string) for k, v := range labels { diff --git a/pkg/canary/util_test.go b/pkg/canary/util_test.go new file mode 100644 index 00000000..f4858f27 --- /dev/null +++ b/pkg/canary/util_test.go @@ -0,0 +1,38 @@ +package canary + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestExcludeLabelsByPrefix(t *testing.T) { + labels := map[string]string{ + "foo": "bar", + "jenkins": "foo", + "flux123": "bar", + } + excludedLabelsPrefixes := []string{"jenkins", "flux"} + + filteredLabels := excludeLabelsByPrefix(labels, excludedLabelsPrefixes) + + assert.Equal(t, filteredLabels, map[string]string{ + "foo": "bar", + // jenkins excluded + // and flux123 also excluded + }) +} + +func TestMakePrimaryLabels(t *testing.T) { + labels := map[string]string{ + "lorem": "ipsum", + "foo": "old-bar", + } + + primaryLabels := makePrimaryLabels(labels, "new-bar", "foo") + + assert.Equal(t, primaryLabels, map[string]string{ + "lorem": "ipsum", // values from old map + "foo": "new-bar", // overriden value for a specific label + }) +} diff --git a/pkg/controller/scheduler_daemonset_fixture_test.go b/pkg/controller/scheduler_daemonset_fixture_test.go index 3c85484b..81c7574e 100644 --- a/pkg/controller/scheduler_daemonset_fixture_test.go +++ b/pkg/controller/scheduler_daemonset_fixture_test.go @@ -87,7 +87,7 @@ func newDaemonSetFixture(c *flaggerv1.Canary) daemonSetFixture { KubeClient: kubeClient, FlaggerClient: flaggerClient, } - canaryFactory := canary.NewFactory(kubeClient, flaggerClient, configTracker, []string{"app", "name"}, logger) + canaryFactory := canary.NewFactory(kubeClient, flaggerClient, configTracker, []string{"app", "name"}, []string{"jenkins"}, logger) ctrl := &Controller{ kubeClient: kubeClient, diff --git a/pkg/controller/scheduler_deployment_fixture_test.go b/pkg/controller/scheduler_deployment_fixture_test.go index e0a95758..73d59d2e 100644 --- a/pkg/controller/scheduler_deployment_fixture_test.go +++ b/pkg/controller/scheduler_deployment_fixture_test.go @@ -115,7 +115,7 @@ func newDeploymentFixture(c *flaggerv1.Canary) fixture { KubeClient: kubeClient, FlaggerClient: flaggerClient, } - canaryFactory := canary.NewFactory(kubeClient, flaggerClient, configTracker, []string{"app", "name"}, logger) + canaryFactory := canary.NewFactory(kubeClient, flaggerClient, configTracker, []string{"app", "name"}, []string{"jenkins"}, logger) ctrl := &Controller{ kubeClient: kubeClient, From 6ec377181afc953aa22355046d65a2b4271cc840 Mon Sep 17 00:00:00 2001 From: Daniel Albuquerque Date: Tue, 13 Oct 2020 21:58:47 +0100 Subject: [PATCH 36/47] Change from exclude labels to include labels --- charts/flagger/README.md | 2 +- cmd/flagger/main.go | 8 +++--- pkg/canary/daemonset_controller.go | 20 +++++++-------- pkg/canary/deployment_controller.go | 20 +++++++-------- pkg/canary/factory.go | 38 ++++++++++++++--------------- pkg/canary/util.go | 12 +++------ pkg/canary/util_test.go | 35 +++++++++++++++++++------- 7 files changed, 74 insertions(+), 61 deletions(-) diff --git a/charts/flagger/README.md b/charts/flagger/README.md index b82e27c8..0c684acc 100644 --- a/charts/flagger/README.md +++ b/charts/flagger/README.md @@ -125,7 +125,7 @@ Parameter | Description | Default `serviceAccount.name` | The name of the service account to create or use. If not set and `serviceAccount.create` is `true`, a name is generated using the Flagger fullname | `""` `serviceAccount.annotations` | Annotations for service account | `{}` `ingressAnnotationsPrefix` | Annotations prefix for ingresses | `custom.ingress.kubernetes.io` -`excludedLabelsPrefixes` | List of prefixes of labels that are excluded when creating primary controllers | `"fluxcd,jenkins"` +`includeLabelPrefix` | List of prefixes of labels that are copied when creating primary deployments or daemonsets. Use * to include all | `""` `rbac.create` | If `true`, create and use RBAC resources | `true` `rbac.pspEnabled` | If `true`, create and use a restricted pod security policy | `false` `crd.create` | If `true`, create Flagger's CRDs (should be enabled for Helm v2 only) | `false` diff --git a/cmd/flagger/main.go b/cmd/flagger/main.go index 217d5b70..5267f08e 100644 --- a/cmd/flagger/main.go +++ b/cmd/flagger/main.go @@ -43,7 +43,7 @@ var ( logLevel string port string msteamsURL string - excludedLabelsPrefixes string + includeLabelPrefix string slackURL string slackUser string slackChannel string @@ -75,7 +75,7 @@ func init() { flag.StringVar(&slackChannel, "slack-channel", "", "Slack channel.") flag.StringVar(&eventWebhook, "event-webhook", "", "Webhook for publishing flagger events") flag.StringVar(&msteamsURL, "msteams-url", "", "MS Teams incoming webhook URL.") - flag.StringVar(&excludedLabelsPrefixes, "excluded-labels-prefixes", "fluxcd,jenkins", "List of prefixes of labels that are excluded when creating primary controllers.") + flag.StringVar(&includeLabelPrefix, "include-label-prefix", "", "List of prefixes of labels that are copied when creating primary deployments or daemonsets. Use * to include all.") flag.IntVar(&threadiness, "threadiness", 2, "Worker concurrency.") flag.BoolVar(&zapReplaceGlobals, "zap-replace-globals", false, "Whether to change the logging level of the global zap logger.") flag.StringVar(&zapEncoding, "zap-encoding", "json", "Zap logger encoding.") @@ -186,9 +186,9 @@ func main() { configTracker = &canary.NopTracker{} } - excludedLabelsPrefixesArray := strings.Split(excludedLabelsPrefixes, ",") + includeLabelPrefixArray := strings.Split(includeLabelPrefix, ",") - canaryFactory := canary.NewFactory(kubeClient, flaggerClient, configTracker, labels, excludedLabelsPrefixesArray, logger) + canaryFactory := canary.NewFactory(kubeClient, flaggerClient, configTracker, labels, includeLabelPrefixArray, logger) c := controller.NewController( kubeClient, diff --git a/pkg/canary/daemonset_controller.go b/pkg/canary/daemonset_controller.go index d7582845..faa35cd3 100644 --- a/pkg/canary/daemonset_controller.go +++ b/pkg/canary/daemonset_controller.go @@ -22,12 +22,12 @@ var ( // DaemonSetController is managing the operations for Kubernetes DaemonSet kind type DaemonSetController struct { - kubeClient kubernetes.Interface - flaggerClient clientset.Interface - logger *zap.SugaredLogger - configTracker Tracker - labels []string - excludedLabelsPrefixes []string + kubeClient kubernetes.Interface + flaggerClient clientset.Interface + logger *zap.SugaredLogger + configTracker Tracker + labels []string + includeLabelPrefix []string } func (c *DaemonSetController) ScaleToZero(cd *flaggerv1.Canary) error { @@ -77,7 +77,7 @@ func (c *DaemonSetController) ScaleFromZero(cd *flaggerv1.Canary) error { // Initialize creates the primary DaemonSet, scales down the canary DaemonSet, // and returns the pod selector label and container ports func (c *DaemonSetController) Initialize(cd *flaggerv1.Canary) (err error) { - err = c.createPrimaryDaemonSet(cd, c.excludedLabelsPrefixes) + err = c.createPrimaryDaemonSet(cd, c.includeLabelPrefix) if err != nil { return fmt.Errorf("createPrimaryDaemonSet failed: %w", err) } @@ -201,7 +201,7 @@ func (c *DaemonSetController) GetMetadata(cd *flaggerv1.Canary) (string, string, return label, labelValue, ports, nil } -func (c *DaemonSetController) createPrimaryDaemonSet(cd *flaggerv1.Canary, excludedLabelsPrefixes []string) error { +func (c *DaemonSetController) createPrimaryDaemonSet(cd *flaggerv1.Canary, includeLabelPrefix []string) error { targetName := cd.Spec.TargetRef.Name primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name) @@ -217,7 +217,7 @@ func (c *DaemonSetController) createPrimaryDaemonSet(cd *flaggerv1.Canary, exclu } // Create the labels map but filter unwanted labels - labels := excludeLabelsByPrefix(canaryDae.Labels, excludedLabelsPrefixes) + labels := includeLabelsByPrefix(canaryDae.Labels, includeLabelPrefix) label, labelValue, err := c.getSelectorLabel(canaryDae) primaryLabelValue := fmt.Sprintf("%s-primary", labelValue) @@ -245,7 +245,7 @@ func (c *DaemonSetController) createPrimaryDaemonSet(cd *flaggerv1.Canary, exclu ObjectMeta: metav1.ObjectMeta{ Name: primaryName, Namespace: cd.Namespace, - Labels: labels, + Labels: makePrimaryLabels(labels, primaryLabelValue, label), OwnerReferences: []metav1.OwnerReference{ *metav1.NewControllerRef(cd, schema.GroupVersionKind{ Group: flaggerv1.SchemeGroupVersion.Group, diff --git a/pkg/canary/deployment_controller.go b/pkg/canary/deployment_controller.go index f0fa37ef..030ae808 100644 --- a/pkg/canary/deployment_controller.go +++ b/pkg/canary/deployment_controller.go @@ -20,19 +20,19 @@ import ( // DeploymentController is managing the operations for Kubernetes Deployment kind type DeploymentController struct { - kubeClient kubernetes.Interface - flaggerClient clientset.Interface - logger *zap.SugaredLogger - configTracker Tracker - labels []string - excludedLabelsPrefixes []string + kubeClient kubernetes.Interface + flaggerClient clientset.Interface + logger *zap.SugaredLogger + configTracker Tracker + labels []string + includeLabelPrefix []string } // Initialize creates the primary deployment, hpa, // scales to zero the canary deployment and returns the pod selector label and container ports func (c *DeploymentController) Initialize(cd *flaggerv1.Canary) (err error) { primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name) - if err := c.createPrimaryDeployment(cd, c.excludedLabelsPrefixes); err != nil { + if err := c.createPrimaryDeployment(cd, c.includeLabelPrefix); err != nil { return fmt.Errorf("createPrimaryDeployment failed: %w", err) } @@ -203,7 +203,7 @@ func (c *DeploymentController) GetMetadata(cd *flaggerv1.Canary) (string, string return label, labelValue, ports, nil } -func (c *DeploymentController) createPrimaryDeployment(cd *flaggerv1.Canary, excludedLabelsPrefixes []string) error { +func (c *DeploymentController) createPrimaryDeployment(cd *flaggerv1.Canary, includeLabelPrefix []string) error { targetName := cd.Spec.TargetRef.Name primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name) @@ -213,7 +213,7 @@ func (c *DeploymentController) createPrimaryDeployment(cd *flaggerv1.Canary, exc } // Create the labels map but filter unwanted labels - labels := excludeLabelsByPrefix(canaryDep.Labels, excludedLabelsPrefixes) + labels := includeLabelsByPrefix(canaryDep.Labels, includeLabelPrefix) label, labelValue, err := c.getSelectorLabel(canaryDep) primaryLabelValue := fmt.Sprintf("%s-primary", labelValue) @@ -246,7 +246,7 @@ func (c *DeploymentController) createPrimaryDeployment(cd *flaggerv1.Canary, exc ObjectMeta: metav1.ObjectMeta{ Name: primaryName, Namespace: cd.Namespace, - Labels: labels, + Labels: makePrimaryLabels(labels, primaryLabelValue, label), OwnerReferences: []metav1.OwnerReference{ *metav1.NewControllerRef(cd, schema.GroupVersionKind{ Group: flaggerv1.SchemeGroupVersion.Group, diff --git a/pkg/canary/factory.go b/pkg/canary/factory.go index 99db8041..9c121b2b 100644 --- a/pkg/canary/factory.go +++ b/pkg/canary/factory.go @@ -8,38 +8,38 @@ import ( ) type Factory struct { - kubeClient kubernetes.Interface - flaggerClient clientset.Interface - logger *zap.SugaredLogger - configTracker Tracker - labels []string - excludedLabelsPrefixes []string + kubeClient kubernetes.Interface + flaggerClient clientset.Interface + logger *zap.SugaredLogger + configTracker Tracker + labels []string + includeLabelPrefix []string } func NewFactory(kubeClient kubernetes.Interface, flaggerClient clientset.Interface, configTracker Tracker, labels []string, - excludedLabelsPrefixes []string, + includeLabelPrefix []string, logger *zap.SugaredLogger) *Factory { return &Factory{ - kubeClient: kubeClient, - flaggerClient: flaggerClient, - logger: logger, - configTracker: configTracker, - labels: labels, - excludedLabelsPrefixes: excludedLabelsPrefixes, + kubeClient: kubeClient, + flaggerClient: flaggerClient, + logger: logger, + configTracker: configTracker, + labels: labels, + includeLabelPrefix: includeLabelPrefix, } } func (factory *Factory) Controller(kind string) Controller { deploymentCtrl := &DeploymentController{ - logger: factory.logger, - kubeClient: factory.kubeClient, - flaggerClient: factory.flaggerClient, - labels: factory.labels, - configTracker: factory.configTracker, - excludedLabelsPrefixes: factory.excludedLabelsPrefixes, + logger: factory.logger, + kubeClient: factory.kubeClient, + flaggerClient: factory.flaggerClient, + labels: factory.labels, + configTracker: factory.configTracker, + includeLabelPrefix: factory.includeLabelPrefix, } daemonSetCtrl := &DaemonSetController{ logger: factory.logger, diff --git a/pkg/canary/util.go b/pkg/canary/util.go index d3554a93..77b6fdfa 100644 --- a/pkg/canary/util.go +++ b/pkg/canary/util.go @@ -76,19 +76,15 @@ func makeAnnotations(annotations map[string]string) (map[string]string, error) { return res, nil } -func excludeLabelsByPrefix(labels map[string]string, excludedLabelsPrefixes []string) map[string]string { +func includeLabelsByPrefix(labels map[string]string, includeLabelPrefixes []string) map[string]string { filteredLabels := make(map[string]string) for key, value := range labels { - isPrefixExcluded := false - for _, excludeLabelPrefix := range excludedLabelsPrefixes { - if strings.HasPrefix(key, excludeLabelPrefix) { - isPrefixExcluded = true + for _, includeLabelPrefix := range includeLabelPrefixes { + if key == "*" || strings.HasPrefix(key, includeLabelPrefix) { + filteredLabels[key] = value break } } - if !isPrefixExcluded { - filteredLabels[key] = value - } } return filteredLabels diff --git a/pkg/canary/util_test.go b/pkg/canary/util_test.go index f4858f27..e8e36f01 100644 --- a/pkg/canary/util_test.go +++ b/pkg/canary/util_test.go @@ -6,20 +6,37 @@ import ( "github.com/stretchr/testify/assert" ) -func TestExcludeLabelsByPrefix(t *testing.T) { +func TestIncludeLabelsByPrefix(t *testing.T) { labels := map[string]string{ - "foo": "bar", - "jenkins": "foo", - "flux123": "bar", + "foo": "foo-value", + "bar": "bar-value", + "lorem": "ipsum", } - excludedLabelsPrefixes := []string{"jenkins", "flux"} + includeLabelPrefix := []string{"foo", "lor"} - filteredLabels := excludeLabelsByPrefix(labels, excludedLabelsPrefixes) + filteredLabels := includeLabelsByPrefix(labels, includeLabelPrefix) assert.Equal(t, filteredLabels, map[string]string{ - "foo": "bar", - // jenkins excluded - // and flux123 also excluded + "foo": "foo-value", + "lorem": "ipsum", + // bar excluded + }) +} + +func TestIncludeLabelsByPrefixWithWildcard(t *testing.T) { + labels := map[string]string{ + "foo": "foo-value", + "bar": "bar-value", + "lorem": "ipsum", + } + includeLabelPrefix := []string{"*"} + + filteredLabels := includeLabelsByPrefix(labels, includeLabelPrefix) + + assert.Equal(t, filteredLabels, map[string]string{ + "foo": "foo-value", + "bar": "bar-value", + "lorem": "ipsum", }) } From 8b87cf1757dc6fe86511b9e36c66d0883808e9be Mon Sep 17 00:00:00 2001 From: Daniel Albuquerque Date: Tue, 13 Oct 2020 21:59:26 +0100 Subject: [PATCH 37/47] MIssing commit --- charts/flagger/templates/deployment.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/charts/flagger/templates/deployment.yaml b/charts/flagger/templates/deployment.yaml index d67b4273..edb88ae6 100644 --- a/charts/flagger/templates/deployment.yaml +++ b/charts/flagger/templates/deployment.yaml @@ -106,8 +106,8 @@ spec: {{- if .Values.ingressAnnotationsPrefix }} - -ingress-annotations-prefix={{ .Values.ingressAnnotationsPrefix }} {{- end }} - {{- if .Values.excludedLabelsPrefixes }} - - -excluded-labels-prefixes={{ .Values.excludedLabelsPrefixes }} + {{- if .Values.includeLabelPrefix }} + - -excluded-labels-prefixes={{ .Values.includeLabelPrefix }} {{- end }} {{- if .Values.ingressClass }} - -ingress-class={{ .Values.ingressClass }} From bef02d8e1f39c466e85fb7ce084469e4d3c1a5bb Mon Sep 17 00:00:00 2001 From: Daniel Albuquerque Date: Tue, 13 Oct 2020 22:00:31 +0100 Subject: [PATCH 38/47] Rename proprty from exclude to include --- charts/flagger/templates/deployment.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/flagger/templates/deployment.yaml b/charts/flagger/templates/deployment.yaml index edb88ae6..363c18ad 100644 --- a/charts/flagger/templates/deployment.yaml +++ b/charts/flagger/templates/deployment.yaml @@ -107,7 +107,7 @@ spec: - -ingress-annotations-prefix={{ .Values.ingressAnnotationsPrefix }} {{- end }} {{- if .Values.includeLabelPrefix }} - - -excluded-labels-prefixes={{ .Values.includeLabelPrefix }} + - -include-label-prefix={{ .Values.includeLabelPrefix }} {{- end }} {{- if .Values.ingressClass }} - -ingress-class={{ .Values.ingressClass }} From 5ca5647faba6b0aaeae3a2563dc0c7501164b63f Mon Sep 17 00:00:00 2001 From: Daniel Albuquerque Date: Tue, 13 Oct 2020 22:01:49 +0100 Subject: [PATCH 39/47] Remove refs to jenkins --- pkg/controller/scheduler_daemonset_fixture_test.go | 2 +- pkg/controller/scheduler_deployment_fixture_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/controller/scheduler_daemonset_fixture_test.go b/pkg/controller/scheduler_daemonset_fixture_test.go index 81c7574e..b4d2f52a 100644 --- a/pkg/controller/scheduler_daemonset_fixture_test.go +++ b/pkg/controller/scheduler_daemonset_fixture_test.go @@ -87,7 +87,7 @@ func newDaemonSetFixture(c *flaggerv1.Canary) daemonSetFixture { KubeClient: kubeClient, FlaggerClient: flaggerClient, } - canaryFactory := canary.NewFactory(kubeClient, flaggerClient, configTracker, []string{"app", "name"}, []string{"jenkins"}, logger) + canaryFactory := canary.NewFactory(kubeClient, flaggerClient, configTracker, []string{"app", "name"}, []string{""}, logger) ctrl := &Controller{ kubeClient: kubeClient, diff --git a/pkg/controller/scheduler_deployment_fixture_test.go b/pkg/controller/scheduler_deployment_fixture_test.go index 73d59d2e..3b84e220 100644 --- a/pkg/controller/scheduler_deployment_fixture_test.go +++ b/pkg/controller/scheduler_deployment_fixture_test.go @@ -115,7 +115,7 @@ func newDeploymentFixture(c *flaggerv1.Canary) fixture { KubeClient: kubeClient, FlaggerClient: flaggerClient, } - canaryFactory := canary.NewFactory(kubeClient, flaggerClient, configTracker, []string{"app", "name"}, []string{"jenkins"}, logger) + canaryFactory := canary.NewFactory(kubeClient, flaggerClient, configTracker, []string{"app", "name"}, []string{""}, logger) ctrl := &Controller{ kubeClient: kubeClient, From bd536b689faf3fd38bc602577b44b37adba78fd9 Mon Sep 17 00:00:00 2001 From: Daniel Albuquerque Date: Wed, 14 Oct 2020 15:20:15 +0100 Subject: [PATCH 40/47] Fix filtering of labels --- pkg/canary/util.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/canary/util.go b/pkg/canary/util.go index 77b6fdfa..75d789fe 100644 --- a/pkg/canary/util.go +++ b/pkg/canary/util.go @@ -80,7 +80,7 @@ func includeLabelsByPrefix(labels map[string]string, includeLabelPrefixes []stri filteredLabels := make(map[string]string) for key, value := range labels { for _, includeLabelPrefix := range includeLabelPrefixes { - if key == "*" || strings.HasPrefix(key, includeLabelPrefix) { + if includeLabelPrefix == "*" || strings.HasPrefix(key, includeLabelPrefix) { filteredLabels[key] = value break } From d3e855ac86e4d33e25af3560d11d84100fc69656 Mon Sep 17 00:00:00 2001 From: Stefan Prodan Date: Wed, 21 Oct 2020 15:20:56 +0300 Subject: [PATCH 41/47] Add GitOps Toolkit integration to roadmap Signed-off-by: Stefan Prodan --- README.md | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 3bf35853..100d943a 100644 --- a/README.md +++ b/README.md @@ -211,10 +211,20 @@ For more details on how the canary analysis and promotion works please [read the ### Roadmap +#### [GitOps Toolkit](https://github.com/fluxcd/toolkit) compatibility + +* Migrate Flagger to Kubernetes controller-runtime and [kubebuilder](https://github.com/kubernetes-sigs/kubebuilder) +* Make the Canary status compatible with [kstatus](https://github.com/kubernetes-sigs/cli-utils) +* Make Flagger emit Kubernetes events compatible with Flux v2 notification API +* Migrate CI to GitHub Actions and publish AMD64, ARM64 and ARMv7 container images +* Integrate Flagger into Flux v2 as the progressive delivery component + +#### Integrations + * Add support for Kubernetes [Ingress v2](https://github.com/kubernetes-sigs/service-apis) -* Integrate with other service mesh like Consul Connect and ingress controllers like HAProxy, ALB -* Integrate with other metrics providers like InfluxDB, Stackdriver, SignalFX -* Add support for comparing the canary metrics to the primary ones and do the validation based on the derivation between the two +* Add support for SMI compatible service mesh solutions like Open Service Mesh and Consul Connect +* Add support for ingress controllers like HAProxy and ALB +* Add support for metrics providers like InfluxDB, Stackdriver, SignalFX ### Contributing From fbece964e0ad47992dfa2905c80569d331358954 Mon Sep 17 00:00:00 2001 From: Daniel Albuquerque Date: Wed, 21 Oct 2020 14:20:09 +0100 Subject: [PATCH 42/47] Copy annotations to deployment and daemonset --- pkg/canary/daemonset_controller.go | 7 ++++--- pkg/canary/deployment_controller.go | 7 ++++--- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/pkg/canary/daemonset_controller.go b/pkg/canary/daemonset_controller.go index faa35cd3..b3c39143 100644 --- a/pkg/canary/daemonset_controller.go +++ b/pkg/canary/daemonset_controller.go @@ -243,9 +243,10 @@ func (c *DaemonSetController) createPrimaryDaemonSet(cd *flaggerv1.Canary, inclu // create primary daemonset primaryDae = &appsv1.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ - Name: primaryName, - Namespace: cd.Namespace, - Labels: makePrimaryLabels(labels, primaryLabelValue, label), + Name: primaryName, + Namespace: cd.Namespace, + Labels: makePrimaryLabels(labels, primaryLabelValue, label), + Annotations: canaryDae.Annotations, OwnerReferences: []metav1.OwnerReference{ *metav1.NewControllerRef(cd, schema.GroupVersionKind{ Group: flaggerv1.SchemeGroupVersion.Group, diff --git a/pkg/canary/deployment_controller.go b/pkg/canary/deployment_controller.go index 030ae808..10064acc 100644 --- a/pkg/canary/deployment_controller.go +++ b/pkg/canary/deployment_controller.go @@ -244,9 +244,10 @@ func (c *DeploymentController) createPrimaryDeployment(cd *flaggerv1.Canary, inc // create primary deployment primaryDep = &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ - Name: primaryName, - Namespace: cd.Namespace, - Labels: makePrimaryLabels(labels, primaryLabelValue, label), + Name: primaryName, + Namespace: cd.Namespace, + Labels: makePrimaryLabels(labels, primaryLabelValue, label), + Annotations: canaryDep.Annotations, OwnerReferences: []metav1.OwnerReference{ *metav1.NewControllerRef(cd, schema.GroupVersionKind{ Group: flaggerv1.SchemeGroupVersion.Group, From 1c58301fd71c84427d41ec9491a7a3b276a205b1 Mon Sep 17 00:00:00 2001 From: Sean Liao Date: Tue, 27 Oct 2020 19:47:07 +0100 Subject: [PATCH 43/47] fix release date --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b34576ca..158d483b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,7 @@ All notable changes to this project are documented in this file. -## 1.2.0 (2020-10-29) +## 1.2.0 (2020-09-29) Add support for New Relic metrics From a624a2977e14697dbfaae6e43ac7b8f936821366 Mon Sep 17 00:00:00 2001 From: Kazuki Nitta Date: Wed, 28 Oct 2020 18:38:54 +0900 Subject: [PATCH 44/47] Add support for Istio VirtualService delegation (#715) Add support for Istio VirtualService delegation --- .circleci/config.yml | 3 + artifacts/flagger/crd.yaml | 3 + charts/flagger/crds/crd.yaml | 3 + docs/gitbook/faq.md | 79 +++++++++++++++ kustomize/base/flagger/crd.yaml | 3 + pkg/apis/flagger/v1beta1/canary.go | 6 ++ pkg/router/istio.go | 13 +++ pkg/router/istio_test.go | 47 +++++++++ test/e2e-istio-tests-delegate.sh | 150 +++++++++++++++++++++++++++++ test/e2e-istio.sh | 7 +- 10 files changed, 312 insertions(+), 2 deletions(-) create mode 100755 test/e2e-istio-tests-delegate.sh diff --git a/.circleci/config.yml b/.circleci/config.yml index 047423cb..0e2ab403 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -97,6 +97,9 @@ jobs: - run: test/e2e-istio-dependencies.sh - run: test/e2e-istio-tests.sh - run: test/e2e-istio-tests-skip-analysis.sh + - run: test/e2e-kubernetes-cleanup.sh + - run: test/e2e-istio-dependencies.sh + - run: test/e2e-istio-tests-delegate.sh e2e-gloo-testing: machine: true diff --git a/artifacts/flagger/crd.yaml b/artifacts/flagger/crd.yaml index 37311bab..6fd69629 100644 --- a/artifacts/flagger/crd.yaml +++ b/artifacts/flagger/crd.yaml @@ -156,6 +156,9 @@ spec: type: array items: type: string + delegation: + description: enable behaving as a delegate VirtualService + type: boolean match: description: URI match conditions type: array diff --git a/charts/flagger/crds/crd.yaml b/charts/flagger/crds/crd.yaml index 37311bab..6fd69629 100644 --- a/charts/flagger/crds/crd.yaml +++ b/charts/flagger/crds/crd.yaml @@ -156,6 +156,9 @@ spec: type: array items: type: string + delegation: + description: enable behaving as a delegate VirtualService + type: boolean match: description: URI match conditions type: array diff --git a/docs/gitbook/faq.md b/docs/gitbook/faq.md index 5deb2cec..081f06ff 100644 --- a/docs/gitbook/faq.md +++ b/docs/gitbook/faq.md @@ -554,6 +554,85 @@ spec: Flagger works for user facing apps exposed outside the cluster via an ingress gateway and for backend HTTP APIs that are accessible only from inside the mesh. +If `Delegation` is enabled, Flagger would generate Istio VirtualService without hosts and gateway, +making the service compatible with Istio delegation. + +```yaml +apiVersion: flagger.app/v1beta1 +kind: Canary +metadata: + name: backend + namespace: test +spec: + service: + delegation: true + port: 9898 + targetRef: + apiVersion: v1 + kind: Deployment + name: podinfo + analysis: + interval: 15s + threshold: 15 + maxWeight: 30 + stepWeight: 10 +``` + +Based on the above spec, Flagger will create the following virtual service: + +```yaml +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: backend + namespace: test + ownerReferences: + - apiVersion: flagger.app/v1beta1 + blockOwnerDeletion: true + controller: true + kind: Canary + name: backend + uid: 58562662-5e10-4512-b269-2b789c1b30fe +spec: + http: + - route: + - destination: + host: podinfo-primary + weight: 100 + - destination: + host: podinfo-canary + weight: 0 +``` + +Therefore, The following virtual service forward the traffic to `/podinfo` by the above delegate VirtualService. + +```yaml +apiVersion: networking.istio.io/v1alpha3 +kind: VirtualService +metadata: + name: frontend + namespace: test +spec: + gateways: + - public-gateway.istio-system.svc.cluster.local + - mesh + hosts: + - frontend.example.com + - frontend + http: + - match: + - uri: + prefix: /podinfo + rewrite: + uri: / + delegate: + name: backend + namespace: test +``` + +Note that pilot env `PILOT_ENABLE_VIRTUAL_SERVICE_DELEGATE` must also be set. +(For the use of Istio Delegation, you can refer to the documentation of [Virtual Service](https://istio.io/latest/docs/reference/config/networking/virtual-service/#Delegate) and [pilot environment variables](https://istio.io/latest/docs/reference/commands/pilot-discovery/#envvars).) + ### Istio Ingress Gateway **How can I expose multiple canaries on the same external domain?** diff --git a/kustomize/base/flagger/crd.yaml b/kustomize/base/flagger/crd.yaml index 37311bab..6fd69629 100644 --- a/kustomize/base/flagger/crd.yaml +++ b/kustomize/base/flagger/crd.yaml @@ -156,6 +156,9 @@ spec: type: array items: type: string + delegation: + description: enable behaving as a delegate VirtualService + type: boolean match: description: URI match conditions type: array diff --git a/pkg/apis/flagger/v1beta1/canary.go b/pkg/apis/flagger/v1beta1/canary.go index 3451e796..12cd6052 100644 --- a/pkg/apis/flagger/v1beta1/canary.go +++ b/pkg/apis/flagger/v1beta1/canary.go @@ -137,6 +137,12 @@ type CanaryService struct { // +optional Hosts []string `json:"hosts,omitempty"` + // If enabled, Flagger would generate Istio VirtualServices without hosts and gateway, + // making the service compatible with Istio delegation. Note that pilot env + // `PILOT_ENABLE_VIRTUAL_SERVICE_DELEGATE` must also be set. + // +optional + Delegation bool `json:"delegation,omitempty"` + // TrafficPolicy attached to the generated Istio destination rules // +optional TrafficPolicy *istiov1alpha3.TrafficPolicy `json:"trafficPolicy,omitempty"` diff --git a/pkg/router/istio.go b/pkg/router/istio.go index bb15c4b6..93cc1139 100644 --- a/pkg/router/istio.go +++ b/pkg/router/istio.go @@ -98,6 +98,13 @@ func (ir *IstioRouter) reconcileDestinationRule(canary *flaggerv1.Canary, name s func (ir *IstioRouter) reconcileVirtualService(canary *flaggerv1.Canary) error { apexName, primaryName, canaryName := canary.GetServiceNames() + if canary.Spec.Service.Delegation { + if len(canary.Spec.Service.Hosts) > 0 || len(canary.Spec.Service.Gateways) > 0 { + // delegate VirtualService cannot have hosts and gateways. + return fmt.Errorf("VirtualService %s.%s cannot have hosts and gateways when delegation enabled", apexName, canary.Namespace) + } + } + // set hosts and add the ClusterIP service host if it doesn't exists hosts := canary.Spec.Service.Hosts var hasServiceHost bool @@ -132,6 +139,12 @@ func (ir *IstioRouter) reconcileVirtualService(canary *flaggerv1.Canary) error { makeDestination(canary, canaryName, 0), } + if canary.Spec.Service.Delegation { + // delegate VirtualService requires the hosts and gateway empty. + hosts = []string{} + gateways = []string{} + } + newSpec := istiov1alpha3.VirtualServiceSpec{ Hosts: hosts, Gateways: gateways, diff --git a/pkg/router/istio_test.go b/pkg/router/istio_test.go index b3457ad1..08b3b7cc 100644 --- a/pkg/router/istio_test.go +++ b/pkg/router/istio_test.go @@ -333,6 +333,53 @@ func TestIstioRouter_GatewayPort(t *testing.T) { assert.Equal(t, uint32(mocks.canary.Spec.Service.Port), port) } +func TestIstioRouter_Delegate(t *testing.T) { + t.Run("ok", func(t *testing.T) { + mocks := newFixture(nil) + mocks.canary.Spec.Service.Hosts = []string{} + mocks.canary.Spec.Service.Gateways = []string{} + mocks.canary.Spec.Service.Delegation = true + + router := &IstioRouter{ + logger: mocks.logger, + flaggerClient: mocks.flaggerClient, + istioClient: mocks.meshClient, + kubeClient: mocks.kubeClient, + } + + err := router.Reconcile(mocks.canary) + require.NoError(t, err) + + vs, err := mocks.meshClient.NetworkingV1alpha3().VirtualServices("default").Get(context.TODO(), "podinfo", metav1.GetOptions{}) + require.NoError(t, err) + + assert.Equal(t, 0, len(vs.Spec.Hosts)) + assert.Equal(t, 0, len(vs.Spec.Gateways)) + }) + + t.Run("invalid", func(t *testing.T) { + mocks := newFixture(nil) + if len(mocks.canary.Spec.Service.Gateways) == 0 { + // in this case, the gateways or hosts should not be not empty because it requires to cause an error. + mocks.canary.Spec.Service.Gateways = []string{ + "public-gateway.istio", + "mesh", + } + } + mocks.canary.Spec.Service.Delegation = true + + router := &IstioRouter{ + logger: mocks.logger, + flaggerClient: mocks.flaggerClient, + istioClient: mocks.meshClient, + kubeClient: mocks.kubeClient, + } + + err := router.Reconcile(mocks.canary) + require.Error(t, err) + }) +} + func TestIstioRouter_Finalize(t *testing.T) { mocks := newFixture(nil) router := &IstioRouter{ diff --git a/test/e2e-istio-tests-delegate.sh b/test/e2e-istio-tests-delegate.sh new file mode 100755 index 00000000..9f934c15 --- /dev/null +++ b/test/e2e-istio-tests-delegate.sh @@ -0,0 +1,150 @@ +#!/usr/bin/env bash + +# This script runs e2e tests for when the canary delegation is enabled +# Prerequisites: Kubernetes Kind and Istio + +set -o errexit + +echo '>>> Set pilot env to enable virtual service delegate' +kubectl -n istio-system set env deploy istiod PILOT_ENABLE_VIRTUAL_SERVICE_DELEGATE=true +kubectl -n istio-system rollout status deploy istiod + +echo '>>> Initialising Gateway' +cat <>> Initialising root virtual service' +cat <>> Initialising canary for delegate' +cat <>> Waiting for primary to be ready' +retries=50 +count=0 +ok=false +until ${ok}; do + kubectl -n test get canary/podinfo | grep 'Initialized' && ok=true || ok=false + sleep 5 + count=$(($count + 1)) + if [[ ${count} -eq ${retries} ]]; then + kubectl -n istio-system logs deployment/flagger + echo "No more retries left" + exit 1 + fi +done + +echo '✔ Canary initialization test passed' + +echo '>>> Triggering canary deployment' +kubectl -n test set image deployment/podinfo podinfod=stefanprodan/podinfo:3.1.1 + +echo '>>> Waiting for canary promotion' +retries=50 +count=0 +ok=false +until ${ok}; do + kubectl -n test describe deployment/podinfo-primary | grep '3.1.1' && ok=true || ok=false + sleep 10 + kubectl -n istio-system logs deployment/flagger --tail 1 + count=$(($count + 1)) + if [[ ${count} -eq ${retries} ]]; then + kubectl -n test describe deployment/podinfo + kubectl -n test describe deployment/podinfo-primary + kubectl -n istio-system logs deployment/flagger + echo "No more retries left" + exit 1 + fi +done + +echo '>>> Waiting for canary finalization' +retries=50 +count=0 +ok=false +until ${ok}; do + kubectl -n test get canary/podinfo | grep 'Succeeded' && ok=true || ok=false + sleep 5 + count=$(($count + 1)) + if [[ ${count} -eq ${retries} ]]; then + kubectl -n istio-system logs deployment/flagger + echo "No more retries left" + exit 1 + fi +done + +echo '>>> Set pilot env to disable virtual service delegate' +kubectl -n istio-system set env deploy istiod PILOT_ENABLE_VIRTUAL_SERVICE_DELEGATE=false +kubectl -n istio-system rollout status deploy istiod + +echo '✔ Canary promotion test passed' + +if [[ "$1" = "canary" ]]; then + exit 0 +fi diff --git a/test/e2e-istio.sh b/test/e2e-istio.sh index 20c1c5c7..c32ec1fb 100755 --- a/test/e2e-istio.sh +++ b/test/e2e-istio.sh @@ -2,7 +2,7 @@ set -o errexit -ISTIO_VER="1.6.7" +ISTIO_VER="1.7.3" REPO_ROOT=$(git rev-parse --show-toplevel) echo ">>> Downloading Istio ${ISTIO_VER}" @@ -10,8 +10,11 @@ cd ${REPO_ROOT}/bin && \ curl -L https://istio.io/downloadIstio | ISTIO_VERSION=${ISTIO_VER} sh - echo ">>> Installing Istio ${ISTIO_VER}" -${REPO_ROOT}/bin/istio-${ISTIO_VER}/bin/istioctl manifest apply --set profile=default +${REPO_ROOT}/bin/istio-${ISTIO_VER}/bin/istioctl manifest install --set profile=default \ + --set values.pilot.resources.requests.cpu=100m \ + --set values.pilot.resources.requests.memory=100Mi +kubectl apply -f https://raw.githubusercontent.com/istio/istio/release-1.7/samples/addons/prometheus.yaml kubectl -n istio-system rollout status deployment/prometheus kubectl -n istio-system get all From f51629d6b599f76705b2fb263fdd40bc5417ec3b Mon Sep 17 00:00:00 2001 From: Kingdon Barrett Date: Tue, 3 Nov 2020 18:11:13 -0500 Subject: [PATCH 45/47] Fix Typo in nginx-progressive-delivery.md "exmaple" -> example --- docs/gitbook/tutorials/nginx-progressive-delivery.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/gitbook/tutorials/nginx-progressive-delivery.md b/docs/gitbook/tutorials/nginx-progressive-delivery.md index e18d91ff..136f180c 100644 --- a/docs/gitbook/tutorials/nginx-progressive-delivery.md +++ b/docs/gitbook/tutorials/nginx-progressive-delivery.md @@ -345,7 +345,7 @@ podinfod=stefanprodan/podinfo:3.1.3 Generate high response latency: ```bash -watch curl http://app.exmaple.com/delay/2 +watch curl http://app.example.com/delay/2 ``` Watch Flagger logs: From 3abeea43d0d4650e2ea2636c8b0638454ca9b5ed Mon Sep 17 00:00:00 2001 From: Kingdon Barrett Date: Tue, 3 Nov 2020 18:11:59 -0500 Subject: [PATCH 46/47] Fix Typo in skipper-progressive-delivery.md "exmaple" -> example --- docs/gitbook/tutorials/skipper-progressive-delivery.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/gitbook/tutorials/skipper-progressive-delivery.md b/docs/gitbook/tutorials/skipper-progressive-delivery.md index 3cc3d17f..2df8a2f4 100644 --- a/docs/gitbook/tutorials/skipper-progressive-delivery.md +++ b/docs/gitbook/tutorials/skipper-progressive-delivery.md @@ -351,7 +351,7 @@ podinfod=stefanprodan/podinfo:4.0.6 Generate high response latency: ```bash -watch curl http://app.exmaple.com/delay/2 +watch curl http://app.example.com/delay/2 ``` Watch Flagger logs: From 4db9701c62c7f88cd2196098a99808f166a84579 Mon Sep 17 00:00:00 2001 From: Henrique Fernandes Date: Wed, 11 Nov 2020 17:48:27 -0300 Subject: [PATCH 47/47] Add QPS and Burst configs for kubernetes client Implemented as requested in PR723 supersedes: https://github.com/weaveworks/flagger/pull/723 fixes: https://github.com/weaveworks/flagger/issues/638 --- charts/flagger/templates/deployment.yaml | 6 ++++++ charts/flagger/values.yaml | 3 +++ cmd/flagger/main.go | 10 ++++++++++ 3 files changed, 19 insertions(+) diff --git a/charts/flagger/templates/deployment.yaml b/charts/flagger/templates/deployment.yaml index 363c18ad..3d8b246c 100644 --- a/charts/flagger/templates/deployment.yaml +++ b/charts/flagger/templates/deployment.yaml @@ -115,6 +115,12 @@ spec: {{- if .Values.eventWebhook }} - -event-webhook={{ .Values.eventWebhook }} {{- end }} + {{- if .Values.kubeconfigQPS }} + - -kubeconfig-qps={{ .Values.kubeconfigQPS }} + {{- end }} + {{- if .Values.kubeconfigBurst }} + - -kubeconfig-burst={{ .Values.kubeconfigBurst }} + {{- end }} {{- if .Values.istio.kubeconfig.secretName }} - -kubeconfig-service-mesh=/tmp/istio-host/{{ .Values.istio.kubeconfig.key }} {{- end }} diff --git a/charts/flagger/values.yaml b/charts/flagger/values.yaml index 3b72e870..15cd45d1 100644 --- a/charts/flagger/values.yaml +++ b/charts/flagger/values.yaml @@ -127,6 +127,9 @@ prometheus: image: docker.io/prom/prometheus:v2.21.0 retention: 2h +kubeconfigQPS: "" +kubeconfigBurst: "" + # Istio multi-cluster service mesh (shared control plane single-network) # https://istio.io/docs/setup/install/multicluster/shared-vpn/ istio: diff --git a/cmd/flagger/main.go b/cmd/flagger/main.go index 5267f08e..d0c54d77 100644 --- a/cmd/flagger/main.go +++ b/cmd/flagger/main.go @@ -38,6 +38,8 @@ import ( var ( masterURL string kubeconfig string + kubeconfigQPS int + kubeconfigBurst int metricsServer string controlLoopInterval time.Duration logLevel string @@ -65,6 +67,8 @@ var ( func init() { flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.") + flag.IntVar(&kubeconfigQPS, "kubeconfig-qps", 100, "Set QPS for kubeconfig.") + flag.IntVar(&kubeconfigBurst, "kubeconfig-burst", 250, "Set Burst for kubeconfig.") flag.StringVar(&masterURL, "master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.") flag.StringVar(&metricsServer, "metrics-server", "http://prometheus:9090", "Prometheus URL.") flag.DurationVar(&controlLoopInterval, "control-loop-interval", 10*time.Second, "Kubernetes API sync interval.") @@ -118,6 +122,9 @@ func main() { logger.Fatalf("Error building kubeconfig: %v", err) } + cfg.QPS = float32(kubeconfigQPS) + cfg.Burst = kubeconfigBurst + kubeClient, err := kubernetes.NewForConfig(cfg) if err != nil { logger.Fatalf("Error building kubernetes clientset: %v", err) @@ -137,6 +144,9 @@ func main() { logger.Fatalf("Error building host kubeconfig: %v", err) } + cfgHost.QPS = float32(kubeconfigQPS) + cfgHost.Burst = kubeconfigBurst + meshClient, err := clientset.NewForConfig(cfgHost) if err != nil { logger.Fatalf("Error building mesh clientset: %v", err)