Merge pull request #1216 from aryan9600/keda-scaled-objects

Add support for KEDA ScaledObjects as an auto scaler
This commit is contained in:
Sanskar Jaiswal 2022-07-08 19:21:22 +05:30 committed by GitHub
commit 76bac5d971
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
46 changed files with 2593 additions and 24 deletions

View File

@ -32,6 +32,7 @@ jobs:
- skipper
- kubernetes
- gatewayapi
- keda
steps:
- name: Checkout
uses: actions/checkout@v2

View File

@ -226,6 +226,19 @@ rules:
- update
- patch
- delete
- apiGroups:
- keda.sh
resources:
- scaledobjects
- scaledobjects/finalizers
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- nonResourceURLs:
- /version
verbs:

View File

@ -104,7 +104,7 @@ spec:
name:
type: string
autoscalerRef:
description: HPA selector
description: Scaler selector
type: object
required: ["apiVersion", "kind", "name"]
properties:
@ -114,8 +114,13 @@ spec:
type: string
enum:
- HorizontalPodAutoscaler
- ScaledObject
name:
type: string
primaryScalerQueries:
type: object
additionalProperties:
type: string
ingressRef:
description: Ingress selector
type: object

View File

@ -104,7 +104,7 @@ spec:
name:
type: string
autoscalerRef:
description: HPA selector
description: Scaler selector
type: object
required: ["apiVersion", "kind", "name"]
properties:
@ -114,8 +114,13 @@ spec:
type: string
enum:
- HorizontalPodAutoscaler
- ScaledObject
name:
type: string
primaryScalerQueries:
type: object
additionalProperties:
type: string
ingressRef:
description: Ingress selector
type: object

View File

@ -234,6 +234,19 @@ rules:
- update
- patch
- delete
- apiGroups:
- keda.sh
resources:
- scaledobjects
- scaledobjects/finalizers
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- nonResourceURLs:
- /version
verbs:

View File

@ -35,6 +35,7 @@
* [Gateway API Canary Deployments](tutorials/gatewayapi-progressive-delivery.md)
* [Blue/Green Deployments](tutorials/kubernetes-blue-green.md)
* [Canary analysis with Prometheus Operator](tutorials/prometheus-operator.md)
* [Canary analysis with KEDA ScaledObjects](tutorials/keda-scaledobject.md)
* [Zero downtime deployments](tutorials/zero-downtime-deployments.md)
## Dev

View File

@ -0,0 +1,234 @@
# Canary analysis with KEDA ScaledObjects
This guide shows you how to use Flagger with KEDA ScaledObjects to autoscale workloads during a Canary analysis run.
We will be using a Blue/Green deployment strategy with the Kubernetes provider for the sake of this tutorial, but
you can use any deployment strategy combined with any supported provider.
## Prerequisites
Flagger requires a Kubernetes cluster **v1.16** or newer. For this tutorial, we'll need KEDA **2.71** or newer.
Install KEDA:
```bash
helm repo add kedacore https://kedacore.github.io/charts
kubectl create namespace keda
helm install keda kedacore/keda --namespace keda
```
Install Flagger:
```bash
helm repo add flagger https://flagger.app
helm upgrade -i flagger flagger/flagger \
--namespace flagger \
--set prometheus.install=true \
--set meshProvider=kubernetes
```
## Bootstrap
Flagger takes a Kubernetes deployment and a KEDA ScaledObject targeting the deployment. It then creates a series of objects
(Kubernetes deployments, ClusterIP services and another KEDA ScaledObject targeting the created Deployment).
These objects expose the application inside the mesh and drive the Canary analysis and Blue/Green promotion.
Create a test namespace:
```bash
kubectl create ns test
```
Create a deployment named `podinfo`:
```bash
kubectl apply -n test -f https://raw.githubusercontent.com/fluxcd/flagger/main/kustomize/podinfo/deployment.yaml
```
Deploy the load testing service to generate traffic during the analysis:
```bash
kubectl apply -k https://github.com/fluxcd/flagger//kustomize/tester?ref=main
```
Create a ScaledObject which targets the `podinfo` deployment and uses Prometheus as a trigger:
```yaml
apiVersion: keda.sh/v1alpha1
kind: ScaledObject
metadata:
name: podinfo-so
namespace: test
spec:
scaleTargetRef:
name: podinfo
pollingInterval: 10
cooldownPeriod: 20
minReplicaCount: 1
maxReplicaCount: 3
triggers:
- type: prometheus
metadata:
name: prom-trigger
serverAddress: http://flagger-prometheus.flagger-system:9090
metricName: http_requests_total
query: sum(rate(http_requests_total{ app="podinfo" }[30s]))
threshold: '5'
```
Create a canary custom resource for the `podinfo` deployment:
```yaml
apiVersion: flagger.app/v1beta1
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
provider: kubernetes
# deployment reference
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
# Scaler reference
autoscalerRef:
apiVersion: keda.sh/v1alpha1
kind: ScaledObject
# ScaledObject targeting the canary deployment
name: podinfo-so
# Mapping between trigger names and the related query to use for the generated
# ScaledObject targeting the primary deployment. (Optional)
primaryScalerQueries:
prom-trigger: sum(rate(http_requests_total{ app="podinfo-primary" }[30s]))
# the maximum time in seconds for the canary deployment
# to make progress before rollback (default 600s)
progressDeadlineSeconds: 60
service:
port: 80
targetPort: 9898
name: podinfo-svc
portDiscovery: true
analysis:
# schedule interval (default 60s)
interval: 15s
# max number of failed checks before rollback
threshold: 5
# number of checks to run before promotion
iterations: 5
# Prometheus checks based on
# http_request_duration_seconds histogram
metrics:
- name: request-success-rate
interval: 1m
thresholdRange:
min: 99
- name: request-duration
interval: 30s
thresholdRange:
max: 500
# load testing hooks
webhooks:
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
type: cmd
cmd: "hey -z 2m -q 20 -c 2 http://podinfo-svc-canary.test/"
```
Save the above resource as `podinfo-canary.yaml` and then apply it:
```bash
kubectl apply -f ./podinfo-canary.yaml
```
After a couple of seconds Flagger will create the canary objects:
```bash
# applied
deployment.apps/podinfo
scaledobject.keda.sh/podinfo-so
canary.flagger.app/podinfo
# generated
deployment.apps/podinfo-primary
horizontalpodautoscaler.autoscaling/podinfo-primary
service/podinfo
service/podinfo-canary
service/podinfo-primary
scaledobject.keda.sh/podinfo-so-primary
```
We refer to our ScaledObject for the canary deployment using `.spec.autoscalerRef`. Flagger will use this to generate a ScaledObject which will scale the primary deployment.
By default, Flagger will try to guess the query to use for the primary ScaledObject, by replacing all mentions of `.spec.targetRef.Name` and `{.spec.targetRef.Name}-canary`
with `{.spec.targetRef.Name}-primary`, for all triggers.
For eg, if your ScaledObject has a trigger query defined as: `sum(rate(http_requests_total{ app="podinfo" }[30s]))` or `sum(rate(http_requests_total{ app="podinfo-primary" }[30s]))`, then the primary ScaledObject will have the same trigger with a query defined as `sum(rate(http_requests_total{ app="podinfo-primary" }[30s]))`.
If, the generated query does not meet your requirements, you can specify the query for autoscaling the primary deployment explicitly using
`.spec.autoscalerRef.primaryScalerQueries`, which lets you define a query for each trigger. Please note that, your ScaledObject's `.spec.triggers[@].name` must
not be blank, as Flagger needs that to identify each trigger uniquely.
After the boostrap, the podinfo deployment will be scaled to zero and the traffic to `podinfo.test` will be routed to the primary pods. To keep the podinfo deployment
at 0 replicas and pause auto scaling, Flagger will add an annotation to your ScaledObject: `autoscaling.keda.sh/paused-replicas: 0`.
During the canary analysis, the annotation is removed, to enable auto scaling for the podinfo deployment.
The `podinfo-canary.test` address can be used to target directly the canary pods.
When the canary analysis starts, Flagger will call the pre-rollout webhooks before routing traffic to the canary. The Blue/Green deployment will run for five iterations while validating the HTTP metrics and rollout hooks every 15 seconds.
## Automated Blue/Green promotion
Trigger a deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=ghcr.io/stefanprodan/podinfo:6.0.1
```
Flagger detects that the deployment revision changed and starts a new rollout:
```text
kubectl -n test describe canary/podinfo
Events:
New revision detected podinfo.test
Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
Pre-rollout check acceptance-test passed
Advance podinfo.test canary iteration 1/10
Advance podinfo.test canary iteration 2/10
Advance podinfo.test canary iteration 3/10
Advance podinfo.test canary iteration 4/10
Advance podinfo.test canary iteration 5/10
Advance podinfo.test canary iteration 6/10
Advance podinfo.test canary iteration 7/10
Advance podinfo.test canary iteration 8/10
Advance podinfo.test canary iteration 9/10
Advance podinfo.test canary iteration 10/10
Copying podinfo.test template spec to podinfo-primary.test
Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
Promotion completed! Scaling down podinfo.test
```
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
You can monitor all canaries with:
```bash
watch kubectl get canaries --all-namespaces
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
test podinfo Progressing 100 2019-06-16T14:05:07Z
```
You can monitor the scaling of the deployments with:
```bash
watch kubectl -n test get deploy podinfo
NAME READY UP-TO-DATE AVAILABLE AGE
flagger-loadtester 1/1 1 1 4m21s
podinfo 3/3 3 3 4m28s
podinfo-primary 3/3 3 3 3m14s
```
You can mointor how Flagger edits the annotations of your ScaledObject with:
```bash
watch "kubectl get -n test scaledobjects podinfo-so -o=jsonpath='{.metadata.annotations}'"
```

View File

@ -30,7 +30,7 @@ chmod +x ${CODEGEN_PKG}/generate-groups.sh
${CODEGEN_PKG}/generate-groups.sh all \
github.com/fluxcd/flagger/pkg/client github.com/fluxcd/flagger/pkg/apis \
"flagger:v1beta1 appmesh:v1beta2 appmesh:v1beta1 istio:v1alpha3 smi:v1alpha1 smi:v1alpha2 smi:v1alpha3 gloo/gloo:v1 gloo/gateway:v1 projectcontour:v1 traefik:v1alpha1 kuma:v1alpha1 gatewayapi:v1alpha2" \
"flagger:v1beta1 appmesh:v1beta2 appmesh:v1beta1 istio:v1alpha3 smi:v1alpha1 smi:v1alpha2 smi:v1alpha3 gloo/gloo:v1 gloo/gateway:v1 projectcontour:v1 traefik:v1alpha1 kuma:v1alpha1 gatewayapi:v1alpha2 keda:v1alpha1" \
--output-base "${TEMP_DIR}" \
--go-header-file ${SCRIPT_ROOT}/hack/boilerplate.go.txt

View File

@ -104,7 +104,7 @@ spec:
name:
type: string
autoscalerRef:
description: HPA selector
description: Scaler selector
type: object
required: ["apiVersion", "kind", "name"]
properties:
@ -114,8 +114,13 @@ spec:
type: string
enum:
- HorizontalPodAutoscaler
- ScaledObject
name:
type: string
primaryScalerQueries:
type: object
additionalProperties:
type: string
ingressRef:
description: Ingress selector
type: object

View File

@ -216,6 +216,19 @@ rules:
- update
- patch
- delete
- apiGroups:
- keda.sh
resources:
- scaledobjects
- scaledobjects/finalizers
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- nonResourceURLs:
- /version
verbs:

View File

@ -74,7 +74,7 @@ type CanarySpec struct {
// AutoscalerRef references an autoscaling resource
// +optional
AutoscalerRef *LocalObjectReference `json:"autoscalerRef,omitempty"`
AutoscalerRef *AutoscalerRefernce `json:"autoscalerRef,omitempty"`
// Reference to NGINX ingress resource
// +optional
@ -413,6 +413,24 @@ type LocalObjectReference struct {
Name string `json:"name"`
}
type AutoscalerRefernce struct {
// API version of the scaler
// +optional
APIVersion string `json:"apiVersion,omitempty"`
// Kind of the scaler
// +optional
Kind string `json:"kind,omitempty"`
// Name of the scaler
Name string `json:"name"`
// PrimaryScalerQueries maps a unique id to a query for the primary
// scaler, if a scaler supports scaling using queries.
// +optional
PrimaryScalerQueries map[string]string `json:"primaryScalerQueries"`
}
// CustomMetadata holds labels and annotations to set on generated objects.
type CustomMetadata struct {
Labels map[string]string `json:"labels,omitempty"`

View File

@ -151,6 +151,29 @@ func (in *AlertProviderStatus) DeepCopy() *AlertProviderStatus {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AutoscalerRefernce) DeepCopyInto(out *AutoscalerRefernce) {
*out = *in
if in.PrimaryScalerQueries != nil {
in, out := &in.PrimaryScalerQueries, &out.PrimaryScalerQueries
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalerRefernce.
func (in *AutoscalerRefernce) DeepCopy() *AutoscalerRefernce {
if in == nil {
return nil
}
out := new(AutoscalerRefernce)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Canary) DeepCopyInto(out *Canary) {
*out = *in
@ -422,8 +445,8 @@ func (in *CanarySpec) DeepCopyInto(out *CanarySpec) {
out.TargetRef = in.TargetRef
if in.AutoscalerRef != nil {
in, out := &in.AutoscalerRef, &out.AutoscalerRef
*out = new(LocalObjectReference)
**out = **in
*out = new(AutoscalerRefernce)
(*in).DeepCopyInto(*out)
}
if in.IngressRef != nil {
in, out := &in.IngressRef, &out.IngressRef

View File

@ -0,0 +1,5 @@
package keda
const (
GroupName = "keda.sh"
)

View File

@ -0,0 +1,5 @@
// +k8s:deepcopy-gen=package
// Package v1 is the v1 version of the API.
// +groupName=keda.sh
package v1alpha1

View File

@ -0,0 +1,36 @@
package v1alpha1
import (
"github.com/fluxcd/flagger/pkg/apis/keda"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// SchemeGroupVersion is the GroupVersion for the Kuma API
var SchemeGroupVersion = schema.GroupVersion{Group: keda.GroupName, Version: "v1alpha1"}
// Kind takes an unqualified kind and returns back a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource gets a Kuma GroupResource for a specified resource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
AddToScheme = SchemeBuilder.AddToScheme
)
// Adds the list of known types to Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&ScaledObject{},
&ScaledObjectList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}

View File

@ -0,0 +1,226 @@
/*
Copyright 2021 The KEDA Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +genclient
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:resource:path=scaledobjects,scope=Namespaced,shortName=so
// +kubebuilder:printcolumn:name="ScaleTargetKind",type="string",JSONPath=".status.scaleTargetKind"
// +kubebuilder:printcolumn:name="ScaleTargetName",type="string",JSONPath=".spec.scaleTargetRef.name"
// +kubebuilder:printcolumn:name="Min",type="integer",JSONPath=".spec.minReplicaCount"
// +kubebuilder:printcolumn:name="Max",type="integer",JSONPath=".spec.maxReplicaCount"
// +kubebuilder:printcolumn:name="Triggers",type="string",JSONPath=".spec.triggers[*].type"
// +kubebuilder:printcolumn:name="Authentication",type="string",JSONPath=".spec.triggers[*].authenticationRef.name"
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status"
// +kubebuilder:printcolumn:name="Active",type="string",JSONPath=".status.conditions[?(@.type==\"Active\")].status"
// +kubebuilder:printcolumn:name="Fallback",type="string",JSONPath=".status.conditions[?(@.type==\"Fallback\")].status"
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ScaledObject is a specification for a ScaledObject resource
type ScaledObject struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ScaledObjectSpec `json:"spec"`
// +optional
Status ScaledObjectStatus `json:"status,omitempty"`
}
// HealthStatus is the status for a ScaledObject's health
type HealthStatus struct {
// +optional
NumberOfFailures *int32 `json:"numberOfFailures,omitempty"`
// +optional
Status HealthStatusType `json:"status,omitempty"`
}
// HealthStatusType is an indication of whether the health status is happy or failing
type HealthStatusType string
const (
// HealthStatusHappy means the status of the health object is happy
HealthStatusHappy HealthStatusType = "Happy"
// HealthStatusFailing means the status of the health object is failing
HealthStatusFailing HealthStatusType = "Failing"
)
// ScaledObjectSpec is the spec for a ScaledObject resource
type ScaledObjectSpec struct {
ScaleTargetRef *ScaleTarget `json:"scaleTargetRef"`
// +optional
PollingInterval *int32 `json:"pollingInterval,omitempty"`
// +optional
CooldownPeriod *int32 `json:"cooldownPeriod,omitempty"`
// +optional
IdleReplicaCount *int32 `json:"idleReplicaCount,omitempty"`
// +optional
MinReplicaCount *int32 `json:"minReplicaCount,omitempty"`
// +optional
MaxReplicaCount *int32 `json:"maxReplicaCount,omitempty"`
// +optional
Advanced *AdvancedConfig `json:"advanced,omitempty"`
Triggers []ScaleTriggers `json:"triggers"`
// +optional
Fallback *Fallback `json:"fallback,omitempty"`
}
// Fallback is the spec for fallback options
type Fallback struct {
FailureThreshold int32 `json:"failureThreshold"`
Replicas int32 `json:"replicas"`
}
// AdvancedConfig specifies advance scaling options
type AdvancedConfig struct {
// +optional
HorizontalPodAutoscalerConfig *HorizontalPodAutoscalerConfig `json:"horizontalPodAutoscalerConfig,omitempty"`
// +optional
RestoreToOriginalReplicaCount bool `json:"restoreToOriginalReplicaCount,omitempty"`
}
// HorizontalPodAutoscalerConfig specifies horizontal scale config
type HorizontalPodAutoscalerConfig struct {
// +optional
Behavior *autoscalingv2beta2.HorizontalPodAutoscalerBehavior `json:"behavior,omitempty"`
}
// ScaleTarget holds the a reference to the scale target Object
type ScaleTarget struct {
Name string `json:"name"`
// +optional
APIVersion string `json:"apiVersion,omitempty"`
// +optional
Kind string `json:"kind,omitempty"`
// +optional
EnvSourceContainerName string `json:"envSourceContainerName,omitempty"`
}
// ScaleTriggers reference the scaler that will be used
type ScaleTriggers struct {
Type string `json:"type"`
// +optional
Name string `json:"name,omitempty"`
Metadata map[string]string `json:"metadata"`
// +optional
AuthenticationRef *ScaledObjectAuthRef `json:"authenticationRef,omitempty"`
// +optional
FallbackReplicas *int32 `json:"fallback,omitempty"`
}
// +k8s:openapi-gen=true
// ScaledObjectStatus is the status for a ScaledObject resource
// +optional
type ScaledObjectStatus struct {
// +optional
ScaleTargetKind string `json:"scaleTargetKind,omitempty"`
// +optional
ScaleTargetGVKR *GroupVersionKindResource `json:"scaleTargetGVKR,omitempty"`
// +optional
OriginalReplicaCount *int32 `json:"originalReplicaCount,omitempty"`
// +optional
LastActiveTime *metav1.Time `json:"lastActiveTime,omitempty"`
// +optional
ExternalMetricNames []string `json:"externalMetricNames,omitempty"`
// +optional
ResourceMetricNames []string `json:"resourceMetricNames,omitempty"`
// +optional
Conditions Conditions `json:"conditions,omitempty"`
// +optional
Health map[string]HealthStatus `json:"health,omitempty"`
}
// +kubebuilder:object:root=true
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ScaledObjectList is a list of ScaledObject resources
type ScaledObjectList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []ScaledObject `json:"items"`
}
// ScaledObjectAuthRef points to the TriggerAuthentication or ClusterTriggerAuthentication object that
// is used to authenticate the scaler with the environment
type ScaledObjectAuthRef struct {
Name string `json:"name"`
// Kind of the resource being referred to. Defaults to TriggerAuthentication.
// +optional
Kind string `json:"kind,omitempty"`
}
// GroupVersionKindResource provides unified structure for schema.GroupVersionKind and Resource
type GroupVersionKindResource struct {
Group string `json:"group"`
Version string `json:"version"`
Kind string `json:"kind"`
Resource string `json:"resource"`
}
// ConditionType specifies the available conditions for the resource
type ConditionType string
const (
// ConditionReady specifies that the resource is ready.
// For long-running resources.
ConditionReady ConditionType = "Ready"
// ConditionActive specifies that the resource has finished.
// For resource which run to completion.
ConditionActive ConditionType = "Active"
// ConditionFallback specifies that the resource has a fallback active.
ConditionFallback ConditionType = "Fallback"
)
const (
// ScaledObjectConditionReadySucccesReason defines the default Reason for correct ScaledObject
ScaledObjectConditionReadySucccesReason = "ScaledObjectReady"
// ScaledObjectConditionReadySuccessMessage defines the default Message for correct ScaledObject
ScaledObjectConditionReadySuccessMessage = "ScaledObject is defined correctly and is ready for scaling"
)
// Condition to store the condition state
type Condition struct {
// Type of condition
// +required
Type ConditionType `json:"type" description:"type of status condition"`
// Status of the condition, one of True, False, Unknown.
// +required
Status metav1.ConditionStatus `json:"status" description:"status of the condition, one of True, False, Unknown"`
// The reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty" description:"one-word CamelCase reason for the condition's last transition"`
// A human readable message indicating details about the transition.
// +optional
Message string `json:"message,omitempty" description:"human-readable message indicating details about last transition"`
}
// Conditions an array representation to store multiple Conditions
type Conditions []Condition
const PausedReplicasAnnotation = "autoscaling.keda.sh/paused-replicas"

View File

@ -0,0 +1,399 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2020 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1
import (
v2beta2 "k8s.io/api/autoscaling/v2beta2"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AdvancedConfig) DeepCopyInto(out *AdvancedConfig) {
*out = *in
if in.HorizontalPodAutoscalerConfig != nil {
in, out := &in.HorizontalPodAutoscalerConfig, &out.HorizontalPodAutoscalerConfig
*out = new(HorizontalPodAutoscalerConfig)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdvancedConfig.
func (in *AdvancedConfig) DeepCopy() *AdvancedConfig {
if in == nil {
return nil
}
out := new(AdvancedConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Condition) DeepCopyInto(out *Condition) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition.
func (in *Condition) DeepCopy() *Condition {
if in == nil {
return nil
}
out := new(Condition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in Conditions) DeepCopyInto(out *Conditions) {
{
in := &in
*out = make(Conditions, len(*in))
copy(*out, *in)
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Conditions.
func (in Conditions) DeepCopy() Conditions {
if in == nil {
return nil
}
out := new(Conditions)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Fallback) DeepCopyInto(out *Fallback) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Fallback.
func (in *Fallback) DeepCopy() *Fallback {
if in == nil {
return nil
}
out := new(Fallback)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GroupVersionKindResource) DeepCopyInto(out *GroupVersionKindResource) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupVersionKindResource.
func (in *GroupVersionKindResource) DeepCopy() *GroupVersionKindResource {
if in == nil {
return nil
}
out := new(GroupVersionKindResource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HealthStatus) DeepCopyInto(out *HealthStatus) {
*out = *in
if in.NumberOfFailures != nil {
in, out := &in.NumberOfFailures, &out.NumberOfFailures
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthStatus.
func (in *HealthStatus) DeepCopy() *HealthStatus {
if in == nil {
return nil
}
out := new(HealthStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HorizontalPodAutoscalerConfig) DeepCopyInto(out *HorizontalPodAutoscalerConfig) {
*out = *in
if in.Behavior != nil {
in, out := &in.Behavior, &out.Behavior
*out = new(v2beta2.HorizontalPodAutoscalerBehavior)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerConfig.
func (in *HorizontalPodAutoscalerConfig) DeepCopy() *HorizontalPodAutoscalerConfig {
if in == nil {
return nil
}
out := new(HorizontalPodAutoscalerConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScaleTarget) DeepCopyInto(out *ScaleTarget) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleTarget.
func (in *ScaleTarget) DeepCopy() *ScaleTarget {
if in == nil {
return nil
}
out := new(ScaleTarget)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScaleTriggers) DeepCopyInto(out *ScaleTriggers) {
*out = *in
if in.Metadata != nil {
in, out := &in.Metadata, &out.Metadata
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.AuthenticationRef != nil {
in, out := &in.AuthenticationRef, &out.AuthenticationRef
*out = new(ScaledObjectAuthRef)
**out = **in
}
if in.FallbackReplicas != nil {
in, out := &in.FallbackReplicas, &out.FallbackReplicas
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleTriggers.
func (in *ScaleTriggers) DeepCopy() *ScaleTriggers {
if in == nil {
return nil
}
out := new(ScaleTriggers)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScaledObject) DeepCopyInto(out *ScaledObject) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaledObject.
func (in *ScaledObject) DeepCopy() *ScaledObject {
if in == nil {
return nil
}
out := new(ScaledObject)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ScaledObject) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScaledObjectAuthRef) DeepCopyInto(out *ScaledObjectAuthRef) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaledObjectAuthRef.
func (in *ScaledObjectAuthRef) DeepCopy() *ScaledObjectAuthRef {
if in == nil {
return nil
}
out := new(ScaledObjectAuthRef)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScaledObjectList) DeepCopyInto(out *ScaledObjectList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ScaledObject, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaledObjectList.
func (in *ScaledObjectList) DeepCopy() *ScaledObjectList {
if in == nil {
return nil
}
out := new(ScaledObjectList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ScaledObjectList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScaledObjectSpec) DeepCopyInto(out *ScaledObjectSpec) {
*out = *in
if in.ScaleTargetRef != nil {
in, out := &in.ScaleTargetRef, &out.ScaleTargetRef
*out = new(ScaleTarget)
**out = **in
}
if in.PollingInterval != nil {
in, out := &in.PollingInterval, &out.PollingInterval
*out = new(int32)
**out = **in
}
if in.CooldownPeriod != nil {
in, out := &in.CooldownPeriod, &out.CooldownPeriod
*out = new(int32)
**out = **in
}
if in.IdleReplicaCount != nil {
in, out := &in.IdleReplicaCount, &out.IdleReplicaCount
*out = new(int32)
**out = **in
}
if in.MinReplicaCount != nil {
in, out := &in.MinReplicaCount, &out.MinReplicaCount
*out = new(int32)
**out = **in
}
if in.MaxReplicaCount != nil {
in, out := &in.MaxReplicaCount, &out.MaxReplicaCount
*out = new(int32)
**out = **in
}
if in.Advanced != nil {
in, out := &in.Advanced, &out.Advanced
*out = new(AdvancedConfig)
(*in).DeepCopyInto(*out)
}
if in.Triggers != nil {
in, out := &in.Triggers, &out.Triggers
*out = make([]ScaleTriggers, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Fallback != nil {
in, out := &in.Fallback, &out.Fallback
*out = new(Fallback)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaledObjectSpec.
func (in *ScaledObjectSpec) DeepCopy() *ScaledObjectSpec {
if in == nil {
return nil
}
out := new(ScaledObjectSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ScaledObjectStatus) DeepCopyInto(out *ScaledObjectStatus) {
*out = *in
if in.ScaleTargetGVKR != nil {
in, out := &in.ScaleTargetGVKR, &out.ScaleTargetGVKR
*out = new(GroupVersionKindResource)
**out = **in
}
if in.OriginalReplicaCount != nil {
in, out := &in.OriginalReplicaCount, &out.OriginalReplicaCount
*out = new(int32)
**out = **in
}
if in.LastActiveTime != nil {
in, out := &in.LastActiveTime, &out.LastActiveTime
*out = (*in).DeepCopy()
}
if in.ExternalMetricNames != nil {
in, out := &in.ExternalMetricNames, &out.ExternalMetricNames
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ResourceMetricNames != nil {
in, out := &in.ResourceMetricNames, &out.ResourceMetricNames
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make(Conditions, len(*in))
copy(*out, *in)
}
if in.Health != nil {
in, out := &in.Health, &out.Health
*out = make(map[string]HealthStatus, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaledObjectStatus.
func (in *ScaledObjectStatus) DeepCopy() *ScaledObjectStatus {
if in == nil {
return nil
}
out := new(ScaledObjectStatus)
in.DeepCopyInto(out)
return out
}

View File

@ -395,7 +395,7 @@ func newDeploymentControllerTestCanary(cc canaryConfigs) *flaggerv1.Canary {
APIVersion: "apps/v1",
Kind: "Deployment",
},
AutoscalerRef: &flaggerv1.LocalObjectReference{
AutoscalerRef: &flaggerv1.AutoscalerRefernce{
Name: "podinfo",
APIVersion: "autoscaling/v2beta2",
Kind: "HorizontalPodAutoscaler",

View File

@ -92,9 +92,18 @@ func (factory *Factory) ScalerReconciler(kind string) ScalerReconciler {
includeLabelPrefix: factory.includeLabelPrefix,
}
soReconciler := &ScaledObjectReconciler{
logger: factory.logger,
kubeClient: factory.kubeClient,
flaggerClient: factory.flaggerClient,
includeLabelPrefix: factory.includeLabelPrefix,
}
switch kind {
case "HorizontalPodAutoscaler":
return hpaReconciler
case "ScaledObject":
return soReconciler
default:
return nil
}

View File

@ -0,0 +1,198 @@
package canary
import (
"context"
"fmt"
"math/rand"
"strings"
"time"
"go.uber.org/zap"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/util/retry"
flaggerv1 "github.com/fluxcd/flagger/pkg/apis/flagger/v1beta1"
keda "github.com/fluxcd/flagger/pkg/apis/keda/v1alpha1"
clientset "github.com/fluxcd/flagger/pkg/client/clientset/versioned"
"github.com/google/go-cmp/cmp"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ScaledObjectReconciler is a ScalerReconciler that reconciles KEDA ScaledObjects.
type ScaledObjectReconciler struct {
kubeClient kubernetes.Interface
flaggerClient clientset.Interface
logger *zap.SugaredLogger
includeLabelPrefix []string
}
func (sor *ScaledObjectReconciler) ReconcilePrimaryScaler(cd *flaggerv1.Canary, init bool) error {
if cd.Spec.AutoscalerRef != nil {
if err := sor.reconcilePrimaryScaler(cd, init); err != nil {
return err
}
}
return nil
}
func (sor *ScaledObjectReconciler) reconcilePrimaryScaler(cd *flaggerv1.Canary, init bool) error {
primaryName := fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name)
targetSo, err := sor.flaggerClient.KedaV1alpha1().ScaledObjects(cd.Namespace).Get(context.TODO(), cd.Spec.AutoscalerRef.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("Keda ScaledObject %s.%s get query error: %w",
cd.Spec.AutoscalerRef.Name, cd.Namespace, err)
}
targetSoClone := targetSo.DeepCopy()
setPrimaryScaledObjectQueries(cd, targetSoClone.Spec.Triggers)
soSpec := keda.ScaledObjectSpec{
ScaleTargetRef: &keda.ScaleTarget{
Name: primaryName,
Kind: targetSoClone.Spec.ScaleTargetRef.Kind,
APIVersion: targetSoClone.Spec.ScaleTargetRef.APIVersion,
EnvSourceContainerName: targetSoClone.Spec.ScaleTargetRef.EnvSourceContainerName,
},
PollingInterval: targetSoClone.Spec.PollingInterval,
CooldownPeriod: targetSoClone.Spec.CooldownPeriod,
MinReplicaCount: targetSoClone.Spec.MinReplicaCount,
MaxReplicaCount: targetSoClone.Spec.MaxReplicaCount,
Advanced: targetSoClone.Spec.Advanced,
Triggers: targetSoClone.Spec.Triggers,
Fallback: targetSoClone.Spec.Fallback,
IdleReplicaCount: targetSoClone.Spec.IdleReplicaCount,
}
primarySoName := fmt.Sprintf("%s-primary", cd.Spec.AutoscalerRef.Name)
primarySo, err := sor.flaggerClient.KedaV1alpha1().ScaledObjects(cd.Namespace).Get(context.TODO(), primarySoName, metav1.GetOptions{})
if errors.IsNotFound(err) {
primarySo = &keda.ScaledObject{
ObjectMeta: makeObjectMeta(primarySoName, targetSoClone.Labels, cd),
Spec: soSpec,
}
_, err = sor.flaggerClient.KedaV1alpha1().ScaledObjects(cd.Namespace).Create(context.TODO(), primarySo, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("creating Keda ScaledObject %s.%s failed: %w",
primarySo.Name, primarySo.Namespace, err)
}
sor.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Infof(
"Keda ScaledObject %s.%s created", primarySo.GetName(), cd.Namespace)
return nil
} else if err != nil {
return fmt.Errorf("Keda ScaledObject %s.%s get query failed: %w",
primarySo.Name, primarySo.Namespace, err)
}
if primarySo != nil && !init {
if diff := cmp.Diff(soSpec, primarySo.Spec); diff != "" {
err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {
primarySo, err := sor.flaggerClient.KedaV1alpha1().ScaledObjects(cd.Namespace).Get(context.TODO(), primarySoName, metav1.GetOptions{})
if err != nil {
return err
}
primarySoClone := primarySo.DeepCopy()
primarySoClone.Spec = soSpec
filteredAnnotations := includeLabelsByPrefix(primarySo.Annotations, sor.includeLabelPrefix)
primarySoClone.Annotations = filteredAnnotations
filteredLabels := includeLabelsByPrefix(primarySo.ObjectMeta.Labels, sor.includeLabelPrefix)
primarySoClone.Labels = filteredLabels
_, err = sor.flaggerClient.KedaV1alpha1().ScaledObjects(cd.Namespace).Update(context.TODO(), primarySoClone, metav1.UpdateOptions{})
return err
})
if err != nil {
return fmt.Errorf("updating ScaledObject %s.%s failed: %w", primarySoName, cd.Namespace, err)
}
}
}
return nil
}
func (sor *ScaledObjectReconciler) PauseTargetScaler(cd *flaggerv1.Canary) error {
err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
so, err := sor.flaggerClient.KedaV1alpha1().ScaledObjects(cd.Namespace).Get(context.TODO(), cd.Spec.AutoscalerRef.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("Keda ScaledObject %s.%s get query error: %w",
cd.Spec.AutoscalerRef.Name, cd.Namespace, err)
}
soClone := so.DeepCopy()
if soClone.ObjectMeta.Annotations == nil {
soClone.ObjectMeta.Annotations = make(map[string]string)
}
soClone.ObjectMeta.Annotations[keda.PausedReplicasAnnotation] = "0"
_, err = sor.flaggerClient.KedaV1alpha1().ScaledObjects(cd.Namespace).Update(context.TODO(), soClone, metav1.UpdateOptions{})
return err
})
return err
}
func (sor *ScaledObjectReconciler) ResumeTargetScaler(cd *flaggerv1.Canary) error {
err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
so, err := sor.flaggerClient.KedaV1alpha1().ScaledObjects(cd.Namespace).Get(context.TODO(), cd.Spec.AutoscalerRef.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("Keda ScaledObject %s.%s get query error: %w",
cd.Spec.AutoscalerRef.Name, cd.Namespace, err)
}
soClone := so.DeepCopy()
if soClone.ObjectMeta.Annotations != nil {
if _, ok := soClone.ObjectMeta.Annotations[keda.PausedReplicasAnnotation]; ok {
delete(soClone.Annotations, keda.PausedReplicasAnnotation)
}
}
_, err = sor.flaggerClient.KedaV1alpha1().ScaledObjects(cd.Namespace).Update(context.TODO(), soClone, metav1.UpdateOptions{})
return err
})
return err
}
var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
func randSeq() string {
rand.Seed(time.Now().UnixNano())
b := make([]rune, 10)
for i := range b {
b[i] = letters[rand.Intn(len(letters))]
}
return string(b)
}
// setPrimaryScaledObjectQueries accepts a list of ScaleTriggers and modifies the query
// for each of them.
func setPrimaryScaledObjectQueries(cd *flaggerv1.Canary, triggers []keda.ScaleTriggers) {
for _, trigger := range triggers {
if cd.Spec.AutoscalerRef.PrimaryScalerQueries != nil {
// If .spec.autoscalerRef.primaryScalerQueries is specified, the triggers must be named,
// otherwise it might lead to unexpected behaviour.
for name, query := range cd.Spec.AutoscalerRef.PrimaryScalerQueries {
if trigger.Name == name {
trigger.Metadata["query"] = query
}
}
} else {
for key, val := range trigger.Metadata {
if key == "query" {
// We could've used regex with negative look-arounds to avoid using a placeholder, but Go does
// not support them. We need them because, we need to replace both "podinfo" and "podinfo-canary"
// (assuming "podinfo" to be the targetRef name), with "podinfo-primary". This placeholder makes
// sure that we don't end up with a query which contains terms like "podinfo-primary-canary" or
// "podinfo-primary-primary". This is a best effort approach, and users should be encouraged to
// check the generated query and opt for using `autoscalerRef.primaryScalerQuery` if the former
// doesn't look correct.
placeholder := randSeq()
replaced := strings.ReplaceAll(val, fmt.Sprintf("%s-canary", cd.Spec.TargetRef.Name), placeholder)
replaced = strings.ReplaceAll(replaced, cd.Spec.TargetRef.Name, fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name))
replaced = strings.ReplaceAll(replaced, placeholder, fmt.Sprintf("%s-primary", cd.Spec.TargetRef.Name))
trigger.Metadata[key] = replaced
}
}
}
}
}

View File

@ -0,0 +1,156 @@
package canary
import (
"context"
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
flaggerv1 "github.com/fluxcd/flagger/pkg/apis/flagger/v1beta1"
keda "github.com/fluxcd/flagger/pkg/apis/keda/v1alpha1"
)
func Test_reconcilePrimaryScaledObject(t *testing.T) {
mocks := newScalerReconcilerFixture(scalerConfig{
targetName: "podinfo",
scaler: "ScaledObject",
})
soReconciler := mocks.scalerReconciler.(*ScaledObjectReconciler)
so, err := mocks.flaggerClient.KedaV1alpha1().ScaledObjects("default").Get(context.TODO(), "podinfo", metav1.GetOptions{})
require.NoError(t, err)
err = soReconciler.reconcilePrimaryScaler(mocks.canary, true)
require.NoError(t, err)
primarySO, err := mocks.flaggerClient.KedaV1alpha1().ScaledObjects("default").Get(context.TODO(), "podinfo-primary", metav1.GetOptions{})
require.NoError(t, err)
assert.Equal(t, primarySO.Spec.ScaleTargetRef.Name, fmt.Sprintf("%s-primary", mocks.canary.Spec.TargetRef.Name))
assert.Equal(t, int(*primarySO.Spec.PollingInterval), 10)
assert.Equal(t, int(*primarySO.Spec.MinReplicaCount), 1)
assert.Equal(t, primarySO.Spec.Triggers[0].Metadata["query"], `sum(rate(http_requests_total{app="podinfo-primary"}[2m]))`)
so.Spec.PollingInterval = int32p(20)
so.Spec.Triggers[0].Metadata["query"] = `sum(rate(http_requests_total{app="podinfo-canary"}[10m]))`
_, err = mocks.flaggerClient.KedaV1alpha1().ScaledObjects("default").Update(context.TODO(), so, metav1.UpdateOptions{})
require.NoError(t, err)
err = soReconciler.reconcilePrimaryScaler(mocks.canary, false)
require.NoError(t, err)
primarySO, err = mocks.flaggerClient.KedaV1alpha1().ScaledObjects("default").Get(context.TODO(), "podinfo-primary", metav1.GetOptions{})
require.NoError(t, err)
assert.Equal(t, int(*primarySO.Spec.PollingInterval), 20)
assert.Equal(t, primarySO.Spec.Triggers[0].Metadata["query"], `sum(rate(http_requests_total{app="podinfo-primary"}[10m]))`)
}
func Test_pauseScaledObject(t *testing.T) {
mocks := newScalerReconcilerFixture(scalerConfig{
targetName: "podinfo",
scaler: "ScaledObject",
})
soReconciler := mocks.scalerReconciler.(*ScaledObjectReconciler)
err := soReconciler.PauseTargetScaler(mocks.canary)
require.NoError(t, err)
so, err := mocks.flaggerClient.KedaV1alpha1().ScaledObjects("default").Get(context.TODO(), "podinfo", metav1.GetOptions{})
require.NoError(t, err)
assert.Equal(t, so.Annotations[keda.PausedReplicasAnnotation], "0")
}
func Test_resumeScaledObject(t *testing.T) {
mocks := newScalerReconcilerFixture(scalerConfig{
targetName: "podinfo",
scaler: "ScaledObject",
})
soReconciler := mocks.scalerReconciler.(*ScaledObjectReconciler)
err := soReconciler.ResumeTargetScaler(mocks.canary)
require.NoError(t, err)
so, err := mocks.flaggerClient.KedaV1alpha1().ScaledObjects("default").Get(context.TODO(), "podinfo", metav1.GetOptions{})
require.NoError(t, err)
_, exists := so.Annotations[keda.PausedReplicasAnnotation]
assert.False(t, exists)
}
func Test_setPrimaryScaledObjectQueries(t *testing.T) {
cd := &flaggerv1.Canary{
Spec: flaggerv1.CanarySpec{
TargetRef: flaggerv1.LocalObjectReference{
Name: "podinfo",
},
AutoscalerRef: &flaggerv1.AutoscalerRefernce{
Name: "podinfo",
},
},
}
tests := []struct {
name string
query string
wantQuery string
}{
{
name: "query only has 'podinfo'",
query: `sum(rate(http_requests_total{app="podinfo"}[2m]))`,
wantQuery: `sum(rate(http_requests_total{app="podinfo-primary"}[2m]))`,
},
{
name: "query only has 'podinfo-canary'",
query: `sum(rate(http_requests_total{app="podinfo-canary"}[2m]))`,
wantQuery: `sum(rate(http_requests_total{app="podinfo-primary"}[2m]))`,
},
{
name: "query has both 'podinfo-canary' and 'podinfo'",
query: `sum(rate(http_requests_total{app="podinfo-canary", svc="podinfo"}[2m]))`,
wantQuery: `sum(rate(http_requests_total{app="podinfo-primary", svc="podinfo-primary"}[2m]))`,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
triggers := make([]keda.ScaleTriggers, 0)
triggers = append(triggers, keda.ScaleTriggers{
Metadata: map[string]string{
"query": test.query,
},
})
setPrimaryScaledObjectQueries(cd, triggers)
assert.Equal(t, triggers[0].Metadata["query"], test.wantQuery)
})
}
pq1 := `sum(rate(envoy_cluster_upstream_rq{ envoy_cluster_name="test_podinfo-primary_80" }[30s]))`
pq2 := `sum(rate(envoy_cluster_upstream_rq{ envoy_cluster_name="test_podinfo" }[30s]))`
triggers := make([]keda.ScaleTriggers, 0)
triggers = append(triggers, keda.ScaleTriggers{
Name: "trigger1",
Metadata: map[string]string{
"query": pq1,
},
})
triggers = append(triggers, keda.ScaleTriggers{
Name: "trigger2",
Metadata: map[string]string{
"query": pq2,
},
})
cd.Spec.AutoscalerRef.PrimaryScalerQueries = map[string]string{
"trigger1": pq1,
"trigger2": pq2,
}
setPrimaryScaledObjectQueries(cd, triggers)
for _, trigger := range triggers {
if trigger.Name == "trigger1" {
assert.Equal(t, pq1, trigger.Metadata["query"])
}
if trigger.Name == "trigger2" {
assert.Equal(t, pq2, trigger.Metadata["query"])
}
}
}

View File

@ -9,6 +9,8 @@ import (
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
keda "github.com/fluxcd/flagger/pkg/apis/keda/v1alpha1"
flaggerv1 "github.com/fluxcd/flagger/pkg/apis/flagger/v1beta1"
clientset "github.com/fluxcd/flagger/pkg/client/clientset/versioned"
fakeFlagger "github.com/fluxcd/flagger/pkg/client/clientset/versioned/fake"
@ -31,7 +33,10 @@ type scalerConfig struct {
func newScalerReconcilerFixture(cfg scalerConfig) scalerReconcilerFixture {
canary := newDeploymentControllerTestCanary(canaryConfigs{targetName: cfg.targetName})
flaggerClient := fakeFlagger.NewSimpleClientset(canary)
flaggerClient := fakeFlagger.NewSimpleClientset(
canary,
newScaledObject(),
)
kubeClient := fake.NewSimpleClientset(
newScalerReconcilerTestHPAV2(),
@ -55,10 +60,18 @@ func newScalerReconcilerFixture(cfg scalerConfig) scalerReconcilerFixture {
}
logger, _ := logger.NewLogger("debug")
var hpaReconciler HPAReconciler
var scalerReconciler ScalerReconciler
if cfg.scaler == "HorizontalPodAutoscaler" {
hpaReconciler = HPAReconciler{
scalerReconciler = &HPAReconciler{
kubeClient: kubeClient,
flaggerClient: flaggerClient,
logger: logger,
includeLabelPrefix: []string{"app.kubernetes.io"},
}
}
if cfg.scaler == "ScaledObject" {
scalerReconciler = &ScaledObjectReconciler{
kubeClient: kubeClient,
flaggerClient: flaggerClient,
logger: logger,
@ -70,7 +83,7 @@ func newScalerReconcilerFixture(cfg scalerConfig) scalerReconcilerFixture {
canary: canary,
kubeClient: kubeClient,
flaggerClient: flaggerClient,
scalerReconciler: &hpaReconciler,
scalerReconciler: scalerReconciler,
logger: logger,
}
}
@ -134,3 +147,34 @@ func newScalerReconcilerTestHPAV2() *hpav2.HorizontalPodAutoscaler {
return h
}
func newScaledObject() *keda.ScaledObject {
so := &keda.ScaledObject{
TypeMeta: metav1.TypeMeta{APIVersion: keda.SchemeGroupVersion.String()},
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "podinfo",
},
Spec: keda.ScaledObjectSpec{
ScaleTargetRef: &keda.ScaleTarget{
Name: "podinfo",
},
PollingInterval: int32p(10),
MinReplicaCount: int32p(1),
MaxReplicaCount: int32p(4),
Triggers: []keda.ScaleTriggers{
{
Type: "prometheus",
Metadata: map[string]string{
"serverAddress": "http://flagger-prometheus.projectcontour:9090",
"metricName": "http_requests_total",
"query": `sum(rate(http_requests_total{app="podinfo-canary"}[2m]))`,
"threshold": "100",
},
},
},
},
}
return so
}

View File

@ -29,6 +29,7 @@ import (
gatewayapiv1alpha2 "github.com/fluxcd/flagger/pkg/client/clientset/versioned/typed/gatewayapi/v1alpha2"
gloov1 "github.com/fluxcd/flagger/pkg/client/clientset/versioned/typed/gloo/v1"
networkingv1alpha3 "github.com/fluxcd/flagger/pkg/client/clientset/versioned/typed/istio/v1alpha3"
kedav1alpha1 "github.com/fluxcd/flagger/pkg/client/clientset/versioned/typed/keda/v1alpha1"
kumav1alpha1 "github.com/fluxcd/flagger/pkg/client/clientset/versioned/typed/kuma/v1alpha1"
projectcontourv1 "github.com/fluxcd/flagger/pkg/client/clientset/versioned/typed/projectcontour/v1"
splitv1alpha1 "github.com/fluxcd/flagger/pkg/client/clientset/versioned/typed/smi/v1alpha1"
@ -49,6 +50,7 @@ type Interface interface {
GatewayapiV1alpha2() gatewayapiv1alpha2.GatewayapiV1alpha2Interface
GlooV1() gloov1.GlooV1Interface
NetworkingV1alpha3() networkingv1alpha3.NetworkingV1alpha3Interface
KedaV1alpha1() kedav1alpha1.KedaV1alpha1Interface
KumaV1alpha1() kumav1alpha1.KumaV1alpha1Interface
ProjectcontourV1() projectcontourv1.ProjectcontourV1Interface
SplitV1alpha1() splitv1alpha1.SplitV1alpha1Interface
@ -68,6 +70,7 @@ type Clientset struct {
gatewayapiV1alpha2 *gatewayapiv1alpha2.GatewayapiV1alpha2Client
glooV1 *gloov1.GlooV1Client
networkingV1alpha3 *networkingv1alpha3.NetworkingV1alpha3Client
kedaV1alpha1 *kedav1alpha1.KedaV1alpha1Client
kumaV1alpha1 *kumav1alpha1.KumaV1alpha1Client
projectcontourV1 *projectcontourv1.ProjectcontourV1Client
splitV1alpha1 *splitv1alpha1.SplitV1alpha1Client
@ -111,6 +114,11 @@ func (c *Clientset) NetworkingV1alpha3() networkingv1alpha3.NetworkingV1alpha3In
return c.networkingV1alpha3
}
// KedaV1alpha1 retrieves the KedaV1alpha1Client
func (c *Clientset) KedaV1alpha1() kedav1alpha1.KedaV1alpha1Interface {
return c.kedaV1alpha1
}
// KumaV1alpha1 retrieves the KumaV1alpha1Client
func (c *Clientset) KumaV1alpha1() kumav1alpha1.KumaV1alpha1Interface {
return c.kumaV1alpha1
@ -213,6 +221,10 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset,
if err != nil {
return nil, err
}
cs.kedaV1alpha1, err = kedav1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient)
if err != nil {
return nil, err
}
cs.kumaV1alpha1, err = kumav1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient)
if err != nil {
return nil, err
@ -265,6 +277,7 @@ func New(c rest.Interface) *Clientset {
cs.gatewayapiV1alpha2 = gatewayapiv1alpha2.New(c)
cs.glooV1 = gloov1.New(c)
cs.networkingV1alpha3 = networkingv1alpha3.New(c)
cs.kedaV1alpha1 = kedav1alpha1.New(c)
cs.kumaV1alpha1 = kumav1alpha1.New(c)
cs.projectcontourV1 = projectcontourv1.New(c)
cs.splitV1alpha1 = splitv1alpha1.New(c)

View File

@ -34,6 +34,8 @@ import (
fakegloov1 "github.com/fluxcd/flagger/pkg/client/clientset/versioned/typed/gloo/v1/fake"
networkingv1alpha3 "github.com/fluxcd/flagger/pkg/client/clientset/versioned/typed/istio/v1alpha3"
fakenetworkingv1alpha3 "github.com/fluxcd/flagger/pkg/client/clientset/versioned/typed/istio/v1alpha3/fake"
kedav1alpha1 "github.com/fluxcd/flagger/pkg/client/clientset/versioned/typed/keda/v1alpha1"
fakekedav1alpha1 "github.com/fluxcd/flagger/pkg/client/clientset/versioned/typed/keda/v1alpha1/fake"
kumav1alpha1 "github.com/fluxcd/flagger/pkg/client/clientset/versioned/typed/kuma/v1alpha1"
fakekumav1alpha1 "github.com/fluxcd/flagger/pkg/client/clientset/versioned/typed/kuma/v1alpha1/fake"
projectcontourv1 "github.com/fluxcd/flagger/pkg/client/clientset/versioned/typed/projectcontour/v1"
@ -138,6 +140,11 @@ func (c *Clientset) NetworkingV1alpha3() networkingv1alpha3.NetworkingV1alpha3In
return &fakenetworkingv1alpha3.FakeNetworkingV1alpha3{Fake: &c.Fake}
}
// KedaV1alpha1 retrieves the KedaV1alpha1Client
func (c *Clientset) KedaV1alpha1() kedav1alpha1.KedaV1alpha1Interface {
return &fakekedav1alpha1.FakeKedaV1alpha1{Fake: &c.Fake}
}
// KumaV1alpha1 retrieves the KumaV1alpha1Client
func (c *Clientset) KumaV1alpha1() kumav1alpha1.KumaV1alpha1Interface {
return &fakekumav1alpha1.FakeKumaV1alpha1{Fake: &c.Fake}

View File

@ -26,6 +26,7 @@ import (
gatewayv1 "github.com/fluxcd/flagger/pkg/apis/gloo/gateway/v1"
gloov1 "github.com/fluxcd/flagger/pkg/apis/gloo/gloo/v1"
networkingv1alpha3 "github.com/fluxcd/flagger/pkg/apis/istio/v1alpha3"
kedav1alpha1 "github.com/fluxcd/flagger/pkg/apis/keda/v1alpha1"
kumav1alpha1 "github.com/fluxcd/flagger/pkg/apis/kuma/v1alpha1"
projectcontourv1 "github.com/fluxcd/flagger/pkg/apis/projectcontour/v1"
splitv1alpha1 "github.com/fluxcd/flagger/pkg/apis/smi/v1alpha1"
@ -50,6 +51,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{
gatewayapiv1alpha2.AddToScheme,
gloov1.AddToScheme,
networkingv1alpha3.AddToScheme,
kedav1alpha1.AddToScheme,
kumav1alpha1.AddToScheme,
projectcontourv1.AddToScheme,
splitv1alpha1.AddToScheme,

View File

@ -26,6 +26,7 @@ import (
gatewayv1 "github.com/fluxcd/flagger/pkg/apis/gloo/gateway/v1"
gloov1 "github.com/fluxcd/flagger/pkg/apis/gloo/gloo/v1"
networkingv1alpha3 "github.com/fluxcd/flagger/pkg/apis/istio/v1alpha3"
kedav1alpha1 "github.com/fluxcd/flagger/pkg/apis/keda/v1alpha1"
kumav1alpha1 "github.com/fluxcd/flagger/pkg/apis/kuma/v1alpha1"
projectcontourv1 "github.com/fluxcd/flagger/pkg/apis/projectcontour/v1"
splitv1alpha1 "github.com/fluxcd/flagger/pkg/apis/smi/v1alpha1"
@ -50,6 +51,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{
gatewayapiv1alpha2.AddToScheme,
gloov1.AddToScheme,
networkingv1alpha3.AddToScheme,
kedav1alpha1.AddToScheme,
kumav1alpha1.AddToScheme,
projectcontourv1.AddToScheme,
splitv1alpha1.AddToScheme,

View File

@ -0,0 +1,20 @@
/*
Copyright 2020 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
// This package has the automatically generated typed clients.
package v1alpha1

View File

@ -0,0 +1,20 @@
/*
Copyright 2020 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
// Package fake has the automatically generated clients.
package fake

View File

@ -0,0 +1,40 @@
/*
Copyright 2020 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
v1alpha1 "github.com/fluxcd/flagger/pkg/client/clientset/versioned/typed/keda/v1alpha1"
rest "k8s.io/client-go/rest"
testing "k8s.io/client-go/testing"
)
type FakeKedaV1alpha1 struct {
*testing.Fake
}
func (c *FakeKedaV1alpha1) ScaledObjects(namespace string) v1alpha1.ScaledObjectInterface {
return &FakeScaledObjects{c, namespace}
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *FakeKedaV1alpha1) RESTClient() rest.Interface {
var ret *rest.RESTClient
return ret
}

View File

@ -0,0 +1,142 @@
/*
Copyright 2020 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
"context"
v1alpha1 "github.com/fluxcd/flagger/pkg/apis/keda/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
schema "k8s.io/apimachinery/pkg/runtime/schema"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
testing "k8s.io/client-go/testing"
)
// FakeScaledObjects implements ScaledObjectInterface
type FakeScaledObjects struct {
Fake *FakeKedaV1alpha1
ns string
}
var scaledobjectsResource = schema.GroupVersionResource{Group: "keda.sh", Version: "v1alpha1", Resource: "scaledobjects"}
var scaledobjectsKind = schema.GroupVersionKind{Group: "keda.sh", Version: "v1alpha1", Kind: "ScaledObject"}
// Get takes name of the scaledObject, and returns the corresponding scaledObject object, and an error if there is any.
func (c *FakeScaledObjects) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ScaledObject, err error) {
obj, err := c.Fake.
Invokes(testing.NewGetAction(scaledobjectsResource, c.ns, name), &v1alpha1.ScaledObject{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.ScaledObject), err
}
// List takes label and field selectors, and returns the list of ScaledObjects that match those selectors.
func (c *FakeScaledObjects) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ScaledObjectList, err error) {
obj, err := c.Fake.
Invokes(testing.NewListAction(scaledobjectsResource, scaledobjectsKind, c.ns, opts), &v1alpha1.ScaledObjectList{})
if obj == nil {
return nil, err
}
label, _, _ := testing.ExtractFromListOptions(opts)
if label == nil {
label = labels.Everything()
}
list := &v1alpha1.ScaledObjectList{ListMeta: obj.(*v1alpha1.ScaledObjectList).ListMeta}
for _, item := range obj.(*v1alpha1.ScaledObjectList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
}
// Watch returns a watch.Interface that watches the requested scaledObjects.
func (c *FakeScaledObjects) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
return c.Fake.
InvokesWatch(testing.NewWatchAction(scaledobjectsResource, c.ns, opts))
}
// Create takes the representation of a scaledObject and creates it. Returns the server's representation of the scaledObject, and an error, if there is any.
func (c *FakeScaledObjects) Create(ctx context.Context, scaledObject *v1alpha1.ScaledObject, opts v1.CreateOptions) (result *v1alpha1.ScaledObject, err error) {
obj, err := c.Fake.
Invokes(testing.NewCreateAction(scaledobjectsResource, c.ns, scaledObject), &v1alpha1.ScaledObject{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.ScaledObject), err
}
// Update takes the representation of a scaledObject and updates it. Returns the server's representation of the scaledObject, and an error, if there is any.
func (c *FakeScaledObjects) Update(ctx context.Context, scaledObject *v1alpha1.ScaledObject, opts v1.UpdateOptions) (result *v1alpha1.ScaledObject, err error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateAction(scaledobjectsResource, c.ns, scaledObject), &v1alpha1.ScaledObject{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.ScaledObject), err
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *FakeScaledObjects) UpdateStatus(ctx context.Context, scaledObject *v1alpha1.ScaledObject, opts v1.UpdateOptions) (*v1alpha1.ScaledObject, error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateSubresourceAction(scaledobjectsResource, "status", c.ns, scaledObject), &v1alpha1.ScaledObject{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.ScaledObject), err
}
// Delete takes name of the scaledObject and deletes it. Returns an error if one occurs.
func (c *FakeScaledObjects) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewDeleteActionWithOptions(scaledobjectsResource, c.ns, name, opts), &v1alpha1.ScaledObject{})
return err
}
// DeleteCollection deletes a collection of objects.
func (c *FakeScaledObjects) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
action := testing.NewDeleteCollectionAction(scaledobjectsResource, c.ns, listOpts)
_, err := c.Fake.Invokes(action, &v1alpha1.ScaledObjectList{})
return err
}
// Patch applies the patch and returns the patched scaledObject.
func (c *FakeScaledObjects) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ScaledObject, err error) {
obj, err := c.Fake.
Invokes(testing.NewPatchSubresourceAction(scaledobjectsResource, c.ns, name, pt, data, subresources...), &v1alpha1.ScaledObject{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.ScaledObject), err
}

View File

@ -0,0 +1,21 @@
/*
Copyright 2020 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
type ScaledObjectExpansion interface{}

View File

@ -0,0 +1,107 @@
/*
Copyright 2020 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
"net/http"
v1alpha1 "github.com/fluxcd/flagger/pkg/apis/keda/v1alpha1"
"github.com/fluxcd/flagger/pkg/client/clientset/versioned/scheme"
rest "k8s.io/client-go/rest"
)
type KedaV1alpha1Interface interface {
RESTClient() rest.Interface
ScaledObjectsGetter
}
// KedaV1alpha1Client is used to interact with features provided by the keda.sh group.
type KedaV1alpha1Client struct {
restClient rest.Interface
}
func (c *KedaV1alpha1Client) ScaledObjects(namespace string) ScaledObjectInterface {
return newScaledObjects(c, namespace)
}
// NewForConfig creates a new KedaV1alpha1Client for the given config.
// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
// where httpClient was generated with rest.HTTPClientFor(c).
func NewForConfig(c *rest.Config) (*KedaV1alpha1Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
httpClient, err := rest.HTTPClientFor(&config)
if err != nil {
return nil, err
}
return NewForConfigAndClient(&config, httpClient)
}
// NewForConfigAndClient creates a new KedaV1alpha1Client for the given config and http client.
// Note the http client provided takes precedence over the configured transport values.
func NewForConfigAndClient(c *rest.Config, h *http.Client) (*KedaV1alpha1Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
client, err := rest.RESTClientForConfigAndClient(&config, h)
if err != nil {
return nil, err
}
return &KedaV1alpha1Client{client}, nil
}
// NewForConfigOrDie creates a new KedaV1alpha1Client for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *KedaV1alpha1Client {
client, err := NewForConfig(c)
if err != nil {
panic(err)
}
return client
}
// New creates a new KedaV1alpha1Client for the given RESTClient.
func New(c rest.Interface) *KedaV1alpha1Client {
return &KedaV1alpha1Client{c}
}
func setConfigDefaults(config *rest.Config) error {
gv := v1alpha1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
}
return nil
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *KedaV1alpha1Client) RESTClient() rest.Interface {
if c == nil {
return nil
}
return c.restClient
}

View File

@ -0,0 +1,195 @@
/*
Copyright 2020 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
"context"
"time"
v1alpha1 "github.com/fluxcd/flagger/pkg/apis/keda/v1alpha1"
scheme "github.com/fluxcd/flagger/pkg/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
)
// ScaledObjectsGetter has a method to return a ScaledObjectInterface.
// A group's client should implement this interface.
type ScaledObjectsGetter interface {
ScaledObjects(namespace string) ScaledObjectInterface
}
// ScaledObjectInterface has methods to work with ScaledObject resources.
type ScaledObjectInterface interface {
Create(ctx context.Context, scaledObject *v1alpha1.ScaledObject, opts v1.CreateOptions) (*v1alpha1.ScaledObject, error)
Update(ctx context.Context, scaledObject *v1alpha1.ScaledObject, opts v1.UpdateOptions) (*v1alpha1.ScaledObject, error)
UpdateStatus(ctx context.Context, scaledObject *v1alpha1.ScaledObject, opts v1.UpdateOptions) (*v1alpha1.ScaledObject, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ScaledObject, error)
List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ScaledObjectList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ScaledObject, err error)
ScaledObjectExpansion
}
// scaledObjects implements ScaledObjectInterface
type scaledObjects struct {
client rest.Interface
ns string
}
// newScaledObjects returns a ScaledObjects
func newScaledObjects(c *KedaV1alpha1Client, namespace string) *scaledObjects {
return &scaledObjects{
client: c.RESTClient(),
ns: namespace,
}
}
// Get takes name of the scaledObject, and returns the corresponding scaledObject object, and an error if there is any.
func (c *scaledObjects) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ScaledObject, err error) {
result = &v1alpha1.ScaledObject{}
err = c.client.Get().
Namespace(c.ns).
Resource("scaledobjects").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do(ctx).
Into(result)
return
}
// List takes label and field selectors, and returns the list of ScaledObjects that match those selectors.
func (c *scaledObjects) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ScaledObjectList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1alpha1.ScaledObjectList{}
err = c.client.Get().
Namespace(c.ns).
Resource("scaledobjects").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do(ctx).
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested scaledObjects.
func (c *scaledObjects) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("scaledobjects").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch(ctx)
}
// Create takes the representation of a scaledObject and creates it. Returns the server's representation of the scaledObject, and an error, if there is any.
func (c *scaledObjects) Create(ctx context.Context, scaledObject *v1alpha1.ScaledObject, opts v1.CreateOptions) (result *v1alpha1.ScaledObject, err error) {
result = &v1alpha1.ScaledObject{}
err = c.client.Post().
Namespace(c.ns).
Resource("scaledobjects").
VersionedParams(&opts, scheme.ParameterCodec).
Body(scaledObject).
Do(ctx).
Into(result)
return
}
// Update takes the representation of a scaledObject and updates it. Returns the server's representation of the scaledObject, and an error, if there is any.
func (c *scaledObjects) Update(ctx context.Context, scaledObject *v1alpha1.ScaledObject, opts v1.UpdateOptions) (result *v1alpha1.ScaledObject, err error) {
result = &v1alpha1.ScaledObject{}
err = c.client.Put().
Namespace(c.ns).
Resource("scaledobjects").
Name(scaledObject.Name).
VersionedParams(&opts, scheme.ParameterCodec).
Body(scaledObject).
Do(ctx).
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *scaledObjects) UpdateStatus(ctx context.Context, scaledObject *v1alpha1.ScaledObject, opts v1.UpdateOptions) (result *v1alpha1.ScaledObject, err error) {
result = &v1alpha1.ScaledObject{}
err = c.client.Put().
Namespace(c.ns).
Resource("scaledobjects").
Name(scaledObject.Name).
SubResource("status").
VersionedParams(&opts, scheme.ParameterCodec).
Body(scaledObject).
Do(ctx).
Into(result)
return
}
// Delete takes name of the scaledObject and deletes it. Returns an error if one occurs.
func (c *scaledObjects) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("scaledobjects").
Name(name).
Body(&opts).
Do(ctx).
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *scaledObjects) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
var timeout time.Duration
if listOpts.TimeoutSeconds != nil {
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Namespace(c.ns).
Resource("scaledobjects").
VersionedParams(&listOpts, scheme.ParameterCodec).
Timeout(timeout).
Body(&opts).
Do(ctx).
Error()
}
// Patch applies the patch and returns the patched scaledObject.
func (c *scaledObjects) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ScaledObject, err error) {
result = &v1alpha1.ScaledObject{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("scaledobjects").
Name(name).
SubResource(subresources...).
VersionedParams(&opts, scheme.ParameterCodec).
Body(data).
Do(ctx).
Into(result)
return
}

View File

@ -31,6 +31,7 @@ import (
gloo "github.com/fluxcd/flagger/pkg/client/informers/externalversions/gloo"
internalinterfaces "github.com/fluxcd/flagger/pkg/client/informers/externalversions/internalinterfaces"
istio "github.com/fluxcd/flagger/pkg/client/informers/externalversions/istio"
keda "github.com/fluxcd/flagger/pkg/client/informers/externalversions/keda"
kuma "github.com/fluxcd/flagger/pkg/client/informers/externalversions/kuma"
projectcontour "github.com/fluxcd/flagger/pkg/client/informers/externalversions/projectcontour"
smi "github.com/fluxcd/flagger/pkg/client/informers/externalversions/smi"
@ -187,6 +188,7 @@ type SharedInformerFactory interface {
Gatewayapi() gatewayapi.Interface
Gloo() gloo.Interface
Networking() istio.Interface
Keda() keda.Interface
Kuma() kuma.Interface
Projectcontour() projectcontour.Interface
Split() smi.Interface
@ -217,6 +219,10 @@ func (f *sharedInformerFactory) Networking() istio.Interface {
return istio.New(f, f.namespace, f.tweakListOptions)
}
func (f *sharedInformerFactory) Keda() keda.Interface {
return keda.New(f, f.namespace, f.tweakListOptions)
}
func (f *sharedInformerFactory) Kuma() kuma.Interface {
return kuma.New(f, f.namespace, f.tweakListOptions)
}

View File

@ -28,7 +28,8 @@ import (
v1 "github.com/fluxcd/flagger/pkg/apis/gloo/gateway/v1"
gloov1 "github.com/fluxcd/flagger/pkg/apis/gloo/gloo/v1"
v1alpha3 "github.com/fluxcd/flagger/pkg/apis/istio/v1alpha3"
v1alpha1 "github.com/fluxcd/flagger/pkg/apis/kuma/v1alpha1"
v1alpha1 "github.com/fluxcd/flagger/pkg/apis/keda/v1alpha1"
kumav1alpha1 "github.com/fluxcd/flagger/pkg/apis/kuma/v1alpha1"
projectcontourv1 "github.com/fluxcd/flagger/pkg/apis/projectcontour/v1"
smiv1alpha1 "github.com/fluxcd/flagger/pkg/apis/smi/v1alpha1"
smiv1alpha2 "github.com/fluxcd/flagger/pkg/apis/smi/v1alpha2"
@ -100,8 +101,12 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource
case gloov1.SchemeGroupVersion.WithResource("upstreams"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Gloo().V1().Upstreams().Informer()}, nil
// Group=keda.sh, Version=v1alpha1
case v1alpha1.SchemeGroupVersion.WithResource("scaledobjects"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Keda().V1alpha1().ScaledObjects().Informer()}, nil
// Group=kuma.io, Version=v1alpha1
case v1alpha1.SchemeGroupVersion.WithResource("trafficroutes"):
case kumav1alpha1.SchemeGroupVersion.WithResource("trafficroutes"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Kuma().V1alpha1().TrafficRoutes().Informer()}, nil
// Group=networking.istio.io, Version=v1alpha3

View File

@ -0,0 +1,46 @@
/*
Copyright 2020 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package keda
import (
internalinterfaces "github.com/fluxcd/flagger/pkg/client/informers/externalversions/internalinterfaces"
v1alpha1 "github.com/fluxcd/flagger/pkg/client/informers/externalversions/keda/v1alpha1"
)
// Interface provides access to each of this group's versions.
type Interface interface {
// V1alpha1 provides access to shared informers for resources in V1alpha1.
V1alpha1() v1alpha1.Interface
}
type group struct {
factory internalinterfaces.SharedInformerFactory
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
}
// New returns a new Interface.
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
// V1alpha1 returns a new v1alpha1.Interface.
func (g *group) V1alpha1() v1alpha1.Interface {
return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions)
}

View File

@ -0,0 +1,45 @@
/*
Copyright 2020 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha1
import (
internalinterfaces "github.com/fluxcd/flagger/pkg/client/informers/externalversions/internalinterfaces"
)
// Interface provides access to all the informers in this group version.
type Interface interface {
// ScaledObjects returns a ScaledObjectInformer.
ScaledObjects() ScaledObjectInformer
}
type version struct {
factory internalinterfaces.SharedInformerFactory
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
}
// New returns a new Interface.
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
// ScaledObjects returns a ScaledObjectInformer.
func (v *version) ScaledObjects() ScaledObjectInformer {
return &scaledObjectInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}

View File

@ -0,0 +1,90 @@
/*
Copyright 2020 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha1
import (
"context"
time "time"
kedav1alpha1 "github.com/fluxcd/flagger/pkg/apis/keda/v1alpha1"
versioned "github.com/fluxcd/flagger/pkg/client/clientset/versioned"
internalinterfaces "github.com/fluxcd/flagger/pkg/client/informers/externalversions/internalinterfaces"
v1alpha1 "github.com/fluxcd/flagger/pkg/client/listers/keda/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
)
// ScaledObjectInformer provides access to a shared informer and lister for
// ScaledObjects.
type ScaledObjectInformer interface {
Informer() cache.SharedIndexInformer
Lister() v1alpha1.ScaledObjectLister
}
type scaledObjectInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
namespace string
}
// NewScaledObjectInformer constructs a new informer for ScaledObject type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewScaledObjectInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredScaledObjectInformer(client, namespace, resyncPeriod, indexers, nil)
}
// NewFilteredScaledObjectInformer constructs a new informer for ScaledObject type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredScaledObjectInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.KedaV1alpha1().ScaledObjects(namespace).List(context.TODO(), options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.KedaV1alpha1().ScaledObjects(namespace).Watch(context.TODO(), options)
},
},
&kedav1alpha1.ScaledObject{},
resyncPeriod,
indexers,
)
}
func (f *scaledObjectInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredScaledObjectInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *scaledObjectInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&kedav1alpha1.ScaledObject{}, f.defaultInformer)
}
func (f *scaledObjectInformer) Lister() v1alpha1.ScaledObjectLister {
return v1alpha1.NewScaledObjectLister(f.Informer().GetIndexer())
}

View File

@ -0,0 +1,27 @@
/*
Copyright 2020 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha1
// ScaledObjectListerExpansion allows custom methods to be added to
// ScaledObjectLister.
type ScaledObjectListerExpansion interface{}
// ScaledObjectNamespaceListerExpansion allows custom methods to be added to
// ScaledObjectNamespaceLister.
type ScaledObjectNamespaceListerExpansion interface{}

View File

@ -0,0 +1,99 @@
/*
Copyright 2020 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha1
import (
v1alpha1 "github.com/fluxcd/flagger/pkg/apis/keda/v1alpha1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
)
// ScaledObjectLister helps list ScaledObjects.
// All objects returned here must be treated as read-only.
type ScaledObjectLister interface {
// List lists all ScaledObjects in the indexer.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*v1alpha1.ScaledObject, err error)
// ScaledObjects returns an object that can list and get ScaledObjects.
ScaledObjects(namespace string) ScaledObjectNamespaceLister
ScaledObjectListerExpansion
}
// scaledObjectLister implements the ScaledObjectLister interface.
type scaledObjectLister struct {
indexer cache.Indexer
}
// NewScaledObjectLister returns a new ScaledObjectLister.
func NewScaledObjectLister(indexer cache.Indexer) ScaledObjectLister {
return &scaledObjectLister{indexer: indexer}
}
// List lists all ScaledObjects in the indexer.
func (s *scaledObjectLister) List(selector labels.Selector) (ret []*v1alpha1.ScaledObject, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha1.ScaledObject))
})
return ret, err
}
// ScaledObjects returns an object that can list and get ScaledObjects.
func (s *scaledObjectLister) ScaledObjects(namespace string) ScaledObjectNamespaceLister {
return scaledObjectNamespaceLister{indexer: s.indexer, namespace: namespace}
}
// ScaledObjectNamespaceLister helps list and get ScaledObjects.
// All objects returned here must be treated as read-only.
type ScaledObjectNamespaceLister interface {
// List lists all ScaledObjects in the indexer for a given namespace.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*v1alpha1.ScaledObject, err error)
// Get retrieves the ScaledObject from the indexer for a given namespace and name.
// Objects returned here must be treated as read-only.
Get(name string) (*v1alpha1.ScaledObject, error)
ScaledObjectNamespaceListerExpansion
}
// scaledObjectNamespaceLister implements the ScaledObjectNamespaceLister
// interface.
type scaledObjectNamespaceLister struct {
indexer cache.Indexer
namespace string
}
// List lists all ScaledObjects in the indexer for a given namespace.
func (s scaledObjectNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.ScaledObject, err error) {
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha1.ScaledObject))
})
return ret, err
}
// Get retrieves the ScaledObject from the indexer for a given namespace and name.
func (s scaledObjectNamespaceLister) Get(name string) (*v1alpha1.ScaledObject, error) {
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v1alpha1.Resource("scaledobject"), name)
}
return obj.(*v1alpha1.ScaledObject), nil
}

View File

@ -224,6 +224,13 @@ func (c *Controller) advanceCanary(name string, namespace string) {
c.recordEventWarningf(cd, "%v", err)
return
}
if cd.Status.Phase == "" || cd.Status.Phase == flaggerv1.CanaryPhaseInitializing {
err = scalerReconciler.PauseTargetScaler(cd)
if err != nil {
c.recordEventWarningf(cd, "%v", err)
return
}
}
}
// change the apex service pod selector to primary
@ -278,7 +285,7 @@ func (c *Controller) advanceCanary(name string, namespace string) {
c.recorder.SetWeight(cd, primaryWeight, canaryWeight)
// check if canary analysis should start (canary revision has changes) or continue
if ok := c.checkCanaryStatus(cd, canaryController, shouldAdvance); !ok {
if ok := c.checkCanaryStatus(cd, canaryController, scalerReconciler, shouldAdvance); !ok {
return
}
@ -328,7 +335,7 @@ func (c *Controller) advanceCanary(name string, namespace string) {
if ok := c.runRollbackHooks(cd, cd.Status.Phase); ok {
c.recordEventWarningf(cd, "Rolling back %s.%s manual webhook invoked", cd.Name, cd.Namespace)
c.alert(cd, "Rolling back manual webhook invoked", false, flaggerv1.SeverityWarn)
c.rollback(cd, canaryController, meshRouter)
c.rollback(cd, canaryController, meshRouter, scalerReconciler)
return
}
}
@ -336,8 +343,7 @@ func (c *Controller) advanceCanary(name string, namespace string) {
// route traffic back to primary if analysis has succeeded
if cd.Status.Phase == flaggerv1.CanaryPhasePromoting {
if scalerReconciler != nil {
err = scalerReconciler.ReconcilePrimaryScaler(cd, false)
if err != nil {
if err := scalerReconciler.ReconcilePrimaryScaler(cd, false); err != nil {
c.recordEventWarningf(cd, "%v", err)
return
}
@ -348,6 +354,12 @@ func (c *Controller) advanceCanary(name string, namespace string) {
// scale canary to zero if promotion has finished
if cd.Status.Phase == flaggerv1.CanaryPhaseFinalising {
if scalerReconciler != nil {
if err := scalerReconciler.PauseTargetScaler(cd); err != nil {
c.recordEventWarningf(cd, "%v", err)
return
}
}
if err := canaryController.ScaleToZero(cd); err != nil {
c.recordEventWarningf(cd, "%v", err)
return
@ -375,7 +387,7 @@ func (c *Controller) advanceCanary(name string, namespace string) {
c.alert(cd, fmt.Sprintf("Progress deadline exceeded %v", err),
false, flaggerv1.SeverityError)
}
c.rollback(cd, canaryController, meshRouter)
c.rollback(cd, canaryController, meshRouter, scalerReconciler)
return
}
@ -723,7 +735,7 @@ func (c *Controller) shouldSkipAnalysis(canary *flaggerv1.Canary, canaryControll
if !retriable {
c.recordEventWarningf(canary, "Rolling back %s.%s progress deadline exceeded %v", canary.Name, canary.Namespace, err)
c.alert(canary, fmt.Sprintf("Progress deadline exceeded %v", err), false, flaggerv1.SeverityError)
c.rollback(canary, canaryController, meshRouter)
c.rollback(canary, canaryController, meshRouter, scalerReconciler)
return true
}
@ -750,6 +762,10 @@ func (c *Controller) shouldSkipAnalysis(canary *flaggerv1.Canary, canaryControll
c.recordEventWarningf(canary, "%v", err)
return true
}
if err := scalerReconciler.PauseTargetScaler(canary); err != nil {
c.recordEventWarningf(canary, "%v", err)
return true
}
}
// shutdown canary
@ -810,7 +826,7 @@ func (c *Controller) shouldAdvance(canary *flaggerv1.Canary, canaryController ca
}
func (c *Controller) checkCanaryStatus(canary *flaggerv1.Canary, canaryController canary.Controller, shouldAdvance bool) bool {
func (c *Controller) checkCanaryStatus(canary *flaggerv1.Canary, canaryController canary.Controller, scalerReconciler canary.ScalerReconciler, shouldAdvance bool) bool {
c.recorder.SetStatus(canary, canary.Status.Phase)
if canary.Status.Phase == flaggerv1.CanaryPhaseProgressing ||
canary.Status.Phase == flaggerv1.CanaryPhaseWaitingPromotion ||
@ -845,6 +861,13 @@ func (c *Controller) checkCanaryStatus(canary *flaggerv1.Canary, canaryControlle
c.alert(canaryPhaseProgressing, "New revision detected, progressing canary analysis.",
true, flaggerv1.SeverityInfo)
if scalerReconciler != nil {
err = scalerReconciler.ResumeTargetScaler(canary)
if err != nil {
c.recordEventWarningf(canary, "%v", err)
return false
}
}
if err := canaryController.ScaleFromZero(canary); err != nil {
c.recordEventErrorf(canary, "%v", err)
return false
@ -872,7 +895,8 @@ func (c *Controller) hasCanaryRevisionChanged(canary *flaggerv1.Canary, canaryCo
return false
}
func (c *Controller) rollback(canary *flaggerv1.Canary, canaryController canary.Controller, meshRouter router.Interface) {
func (c *Controller) rollback(canary *flaggerv1.Canary, canaryController canary.Controller,
meshRouter router.Interface, scalerReconciler canary.ScalerReconciler) {
if canary.Status.FailedChecks >= canary.GetAnalysisThreshold() {
c.recordEventWarningf(canary, "Rolling back %s.%s failed checks threshold reached %v",
canary.Name, canary.Namespace, canary.Status.FailedChecks)
@ -895,6 +919,12 @@ func (c *Controller) rollback(canary *flaggerv1.Canary, canaryController canary.
c.recorder.SetWeight(canary, primaryWeight, canaryWeight)
if scalerReconciler != nil {
if err := scalerReconciler.PauseTargetScaler(canary); err != nil {
c.recordEventWarningf(canary, "%v", err)
return
}
}
// shutdown canary
if err := canaryController.ScaleToZero(canary); err != nil {
c.recordEventWarningf(canary, "%v", err)

View File

@ -294,7 +294,7 @@ func newDeploymentTestCanary() *flaggerv1.Canary {
APIVersion: "apps/v1",
Kind: "Deployment",
},
AutoscalerRef: &flaggerv1.LocalObjectReference{
AutoscalerRef: &flaggerv1.AutoscalerRefernce{
Name: "podinfo",
APIVersion: "autoscaling/v2beta2",
Kind: "HorizontalPodAutoscaler",
@ -356,7 +356,7 @@ func newDeploymentTestCanaryAB() *flaggerv1.Canary {
APIVersion: "apps/v1",
Kind: "Deployment",
},
AutoscalerRef: &flaggerv1.LocalObjectReference{
AutoscalerRef: &flaggerv1.AutoscalerRefernce{
Name: "podinfo",
APIVersion: "autoscaling/v2beta2",
Kind: "HorizontalPodAutoscaler",

22
test/keda/install.sh Executable file
View File

@ -0,0 +1,22 @@
#!/usr/bin/env bash
set -o errexit
REPO_ROOT=$(git rev-parse --show-toplevel)
mkdir -p ${REPO_ROOT}/bin
echo '>>> Installing KEDA'
helm repo add kedacore https://kedacore.github.io/charts
kubectl create ns keda
helm install keda kedacore/keda --namespace keda --wait
kubectl -n keda get all
echo '>>> Installing Flagger'
kubectl apply -k ${REPO_ROOT}/kustomize/kubernetes
kubectl -n flagger-system set image deployment/flagger flagger=test/flagger:latest
kubectl -n flagger-system rollout status deployment/flagger
kubectl -n flagger-system rollout status deployment/flagger-prometheus

11
test/keda/run.sh Executable file
View File

@ -0,0 +1,11 @@
#!/usr/bin/env bash
set -o errexit
REPO_ROOT=$(git rev-parse --show-toplevel)
DIR="$(cd "$(dirname "$0")" && pwd)"
"$DIR"/install.sh
"$REPO_ROOT"/test/workloads/init.sh
"$DIR"/test-scaledobject.sh

208
test/keda/test-scaledobject.sh Executable file
View File

@ -0,0 +1,208 @@
#!/usr/bin/env bash
# This script runs e2e tests targetting Flagger's integration with KEDA ScaledObjects.
set -o errexit
REPO_ROOT=$(git rev-parse --show-toplevel)
cat <<EOF | kubectl apply -f -
apiVersion: keda.sh/v1alpha1
kind: ScaledObject
metadata:
name: podinfo-so
namespace: test
spec:
scaleTargetRef:
name: podinfo
pollingInterval: 10
cooldownPeriod: 20
minReplicaCount: 1
maxReplicaCount: 3
triggers:
- type: prometheus
metadata:
serverAddress: http://flagger-prometheus.flagger-system:9090
metricName: http_requests_total
query: sum(rate(http_requests_total{ app="podinfo" }[30s]))
threshold: '5'
EOF
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Service
metadata:
name: podinfo-svc
namespace: test
spec:
type: ClusterIP
selector:
app: podinfo
ports:
- name: http
port: 9898
protocol: TCP
targetPort: http
---
apiVersion: flagger.app/v1beta1
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
provider: kubernetes
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
autoscalerRef:
apiVersion: keda.sh/v1alpha1
kind: ScaledObject
name: podinfo-so
progressDeadlineSeconds: 60
service:
port: 80
targetPort: 9898
name: podinfo-svc
portDiscovery: true
analysis:
interval: 15s
threshold: 10
iterations: 8
metrics:
- name: request-success-rate
interval: 1m
thresholdRange:
min: 99
- name: request-duration
interval: 30s
thresholdRange:
max: 500
webhooks:
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
type: cmd
cmd: "hey -z 2m -q 20 -c 2 http://podinfo-svc-canary.test/"
EOF
echo '>>> Waiting for primary to be ready'
retries=50
count=0
ok=false
until ${ok}; do
kubectl -n test get canary/podinfo | grep 'Initialized' && ok=true || ok=false
sleep 5
count=$(($count + 1))
if [[ ${count} -eq ${retries} ]]; then
kubectl -n flagger-system logs deployment/flagger
echo "No more retries left"
exit 1
fi
done
echo '✔ Canary initialization test passed'
expectedQuery='sum(rate(http_requests_total{ app="podinfo-primary" }[30s]))'
if kubectl -n test get scaledobjects podinfo-so-primary; then
query=$(kubectl -n test get scaledobjects podinfo-so-primary -o=jsonpath='{.spec.triggers[0].metadata.query}')
if [[ "$query" = "$expectedQuery" ]]; then
echo '✔ Primary ScaledObject successfully reconciled'
else
kubectl -n test get scaledobjects podinfo-so-primary -oyaml
echo ' Primary ScaledObject query does not match expected query'
exit 1
fi
else
echo ' Primary ScaledObject not found'
exit 1
fi
val=$(kubectl -n test get scaledobject podinfo-so -o=jsonpath='{.metadata.annotations.autoscaling\.keda\.sh\/paused-replicas}' | xargs)
if [[ "$val" = "0" ]]; then
echo '✔ Successfully paused autoscaling for target ScaledObject'
else
echo ' Could not pause autoscaling for target ScaledObject'
fi
echo '>>> Triggering canary deployment'
kubectl -n test set image deployment/podinfo podinfod=ghcr.io/stefanprodan/podinfo:6.0.1
echo '>>> Waiting for ScaledObject autoscaling to get unpaused'
retries=20
count=0
ok=false
until ${ok}; do
val=$(kubectl -n test get scaledobject podinfo-so -o=jsonpath='{.metadata.annotations.autoscaling\.keda\.sh\/paused-replicas}' | xargs)
if [[ "$val" = "" ]]; then
ok=true
fi
sleep 2
kubectl -n flagger-system logs deployment/flagger --tail 1
count=$(($count + 1))
if [[ ${count} -eq ${retries} ]]; then
kubectl -n flagger-system logs deployment/flagger
kubectl -n test get scaledobject podinfo-so -oyaml
echo "No more retries left"
exit 1
fi
done
echo '>>> Waiting for canary deployment to be scaled up'
retries=20
count=0
ok=false
until ${ok}; do
kubectl -n test get deployment/podinfo -oyaml | grep 'replicas: 3' && ok=true || ok=false
sleep 5
kubectl -n flagger-system logs deployment/flagger --tail 1
count=$(($count + 1))
if [[ ${count} -eq ${retries} ]]; then
kubectl -n flagger-system logs deployment/flagger
kubectl -n test get deploy/podinfo -oyaml
echo "No more retries left"
exit 1
fi
done
echo '>>> Waiting for canary promotion'
retries=50
count=0
ok=false
until ${ok}; do
kubectl -n test describe deployment/podinfo-primary | grep '6.0.1' && ok=true || ok=false
sleep 10
kubectl -n flagger-system logs deployment/flagger --tail 1
count=$(($count + 1))
if [[ ${count} -eq ${retries} ]]; then
kubectl -n flagger-system logs deployment/flagger
kubectl -n test get httpproxy podinfo -oyaml
echo "No more retries left"
exit 1
fi
done
echo '✔ Canary promotion test passed'
echo '>>> Waiting for canary finalization'
retries=50
count=0
ok=false
until ${ok}; do
kubectl -n test get canary/podinfo | grep 'Succeeded' && ok=true || ok=false
sleep 5
count=$(($count + 1))
if [[ ${count} -eq ${retries} ]]; then
kubectl -n flagger-system logs deployment/flagger
echo "No more retries left"
exit 1
fi
done
val=$(kubectl -n test get scaledobject podinfo-so -o=jsonpath='{.metadata.annotations.autoscaling\.keda\.sh\/paused-replicas}' | xargs)
if [[ "$val" = "0" ]]; then
echo '✔ Successfully paused autoscaling for target ScaledObject'
else
echo ' Could not pause autoscaling for target ScaledObject'
fi

View File

@ -1,5 +1,7 @@
#!/usr/bin/env bash
# This script runs E2E tests targetting Flagger's integration with HPAs.
set -o errexit
REPO_ROOT=$(git rev-parse --show-toplevel)